Related
In my code, I am trying to extract data from csv file to use in the function, but it doesnt output anything, and gives no error. My code works because I tried it with just numpy array as inputs. not sure why it doesnt work with panda.
import numpy as np
import pandas as pd
import os
# change the current directory to the directory where the running script file is
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# finding best fit line for y=mx+b by iteration
def gradient_descent(x,y):
m_iter = b_iter = 1 #starting point
iteration = 10000
n = len(x)
learning_rate = 0.05
last_mse = 10000
#take baby steps to reach global minima
for i in range(iteration):
y_predicted = m_iter*x + b_iter
#mse = 1/n*sum([value**2 for value in (y-y_predicted)]) # cost function to minimize
mse = 1/n*sum((y-y_predicted)**2) # cost function to minimize
if (last_mse - mse)/mse < 0.001:
break
# recall MSE formula is 1/n*sum((yi-y_predicted)^2), where y_predicted = m*x+b
# using partial deriv of MSE formula, d/dm and d/db
dm = -(2/n)*sum(x*(y-y_predicted))
db = -(2/n)*sum((y-y_predicted))
# use current predicted value to get the next value for prediction
# by using learning rate
m_iter = m_iter - learning_rate*dm
b_iter = b_iter - learning_rate*db
print('m is {}, b is {}, cost is {}, iteration {}'.format(m_iter,b_iter,mse,i))
last_mse = mse
#x = np.array([1,2,3,4,5])
#y = np.array([5,7,8,10,13])
#gradient_descent(x,y)
df = pd.read_csv('Linear_Data.csv')
x = df['Area']
y = df['Price']
gradient_descent(x,y)
My code works because I tried it with just numpy array as inputs. not sure why it doesnt work with panda.
Well no, your code also works with pandas dataframes:
df = pd.DataFrame({'Area': [1,2,3,4,5], 'Price': [5,7,8,10,13]})
x = df['Area']
y = df['Price']
gradient_descent(x,y)
Above will give you the same output as with numpy arrays.
Try to check what's in Linear_Data.csv and/or add some print statements in the gradient_descent function just to check your assumptions. I would suggest to first of all add a print statement before the condition with the break statement:
print(last_mse, mse)
if (last_mse - mse)/mse < 0.001:
break
I am having trouble with random_state in scipy.stats.truncnorm. Here is my code:
from scipy.stats import truncnorm
from numpy.random import default_rng
rg = default_rng( 12345 )
truncnorm.rvs(0.0,1.0,size=10, random_state=rg)
I get the following error:
File "test2.py", line 4, in <module>
truncnorm.rvs(0.0,1.0,size=10, random_state=rg)
File "/opt/anaconda3/envs/newbase/lib/python3.8/site-packages/scipy/stats/_distn_infrastructure.py", line 1004, in rvs
vals = self._rvs(*args, size=size, random_state=random_state)
File "/opt/anaconda3/envs/newbase/lib/python3.8/site-packages/scipy/stats/_continuous_distns.py", line 7641, in _rvs
out = self._rvs_scalar(a.item(), b.item(), size, random_state=random_state)
File "/opt/anaconda3/envs/newbase/lib/python3.8/site-packages/scipy/stats/_continuous_distns.py", line 7697, in _rvs_scalar
U = random_state.random_sample(N)
AttributeError: 'numpy.random._generator.Generator' object has no attribute 'random_sample'
I am using numpy 1.19.1 and scipy 1.5.0. The problem does not occur with scipy.norm.rvs.
In scipy 1.7.1, the problem line has been changed to:
def _rvs_scalar(self, a, b, numsamples=None, random_state=None):
if not numsamples:
numsamples = 1
# prepare sampling of rvs
size1d = tuple(np.atleast_1d(numsamples))
N = np.prod(size1d) # number of rvs needed, reshape upon return
# Calculate some rvs
U = random_state.uniform(low=0, high=1, size=N)
x = self._ppf(U, a, b)
rvs = np.reshape(x, size1d)
return rvs
Both have uniform, but rg does not have random_sample:
In [221]: rg.uniform
Out[221]: <function Generator.uniform>
In [222]: np.random.uniform
Out[222]: <function RandomState.uniform>
np.random.random_sample has this note:
.. note::
New code should use the ``random`` method of a ``default_rng()``
instance instead; please see the :ref:`random-quick-start`.
I am trying to use the featuretools library to make new features on a simple dataset, however, whenever I try to use a bigger max_depth, nothing happens... Here is my code so far:
# imports
import featuretools as ft
# creating the EntitySet
es = ft.EntitySet()
es.entity_from_dataframe(entity_id='data', dataframe=data, make_index=True, index='index')
# Run deep feature synthesis with transformation primitives
feature_matrix, feature_defs = ft.dfs(entityset=es, target_entity='data', max_depth=3,
trans_primitives=['add_numeric', 'multiply_numeric'])
When I look at the features created, I get the basic things f1*f2 and f1+f2, but I would like more complex engineered features like f2*(f1+f2) or f1+(f2+f1). I thought increasing max_depth would do this but apparently not.
How could I do this, if at all?
I have managed to answer my own question, so I'll post it here.
You can create deeper features by running "Deep Feature Synthesis" on already generated features. Here is an example:
# imports
import featuretools as ft
# creating the EntitySet
es = ft.EntitySet()
es.entity_from_dataframe(entity_id='data', dataframe=data, make_index=True, index='index')
# Run deep feature synthesis with transformation primitives
feature_matrix, feature_defs = ft.dfs(entityset=es, target_entity='data',
trans_primitives=['add_numeric','multiply_numeric'])
# creating an EntitySet from the new features
deep_es = ft.EntitySet()
deep_es.entity_from_dataframe(entity_id='data', index='index', dataframe=feature_matrix)
# Run deep feature synthesis with transformation primitives
deep_feature_matrix, deep_feature_defs=ft.dfs(entityset=deep_es, target_entity='data',
trans_primitives=['add_numeric','multiply_numeric'])
Now, looking at the columns of deep_feature_matrix here is what we see (assuming a dataset with 2 features):
"f1", "f2", "f1+f2", "f1*f2", "f1+f1*f2", "f1+f1+f2", "f1*f2+f1+f2", "f1*f2+f2", "f1+f2+f2", "f1*f1*f2", "f1*f1+f2", "f1*f2*f1+f2", "f1*f2*f2", "f1+f2*f2"
I have also made a function that automatically does this (includes a full docstring):
def auto_feature_engineering(X, y, selection_percent=0.1, selection_strategy="best", num_depth_steps=2, transformatives=['divide_numeric', 'multiply_numeric']):
"""
Automatically perform deep feature engineering and
feature selection.
Parameters
----------
X : pd.DataFrame
Data to perform automatic feature engineering on.
y : pd.DataFrame
Target variable to find correlations of all
features at each depth step to perform feature
selection, y is not needed if selection_percent=1.
selection_percent : float, optional
Defines what percent of all the new features to
keep for the next depth step.
selection_strategy : {'best', 'random'}, optional
Strategy used for feature selection, if 'best',
it will select the best features for the next depth
step, if 'random', it will select features at random.
num_depth_steps : integer, optional
The number of depth steps. Every depth step, the model
generates brand new features from the features made in
the last step, then selects a percent of these new
features.
transformatives : list, optional
List of all possible transformations of the data to use
when feature engineering, you can find the full list
of possible transformations as well as what each one
does using the following code:
`ft.primitives.list_primitives()[ft.primitives.list_primitives()["type"]=="transform"]`
make sure to `import featuretools as ft`.
Returns
-------
pd.DataFrame
a dataframe of the brand new features.
"""
from sklearn.feature_selection import mutual_info_classif
selected_feature_df = X.copy()
for i in range(num_depth_steps):
# Perform feature engineering
es = ft.EntitySet()
es.entity_from_dataframe(entity_id='data', dataframe=selected_feature_df,
make_index=True, index='index')
feature_matrix, feature_defs = ft.dfs(entityset=es, target_entity='data', trans_primitives=transformatives)
# Remove features that are the same
feature_corrs = feature_matrix.corr()[list(feature_matrix.keys())[0]]
existing_corrs = []
good_keys = []
for key in feature_corrs.to_dict().keys():
if feature_corrs[key] not in existing_corrs:
existing_corrs.append(feature_corrs[key])
good_keys.append(key)
feature_matrix = feature_matrix[good_keys]
# Remove illegal features
legal_features = list(feature_matrix.columns)
for feature in list(feature_matrix.columns):
raw_feature_list = []
for j in range(len(feature.split(" "))):
if j%2==0:
raw_feature_list.append(feature.split(" ")[j])
if len(raw_feature_list) > i+2: # num_depth_steps = 1, means max_num_raw_features_in_feature = 2
legal_features.remove(feature)
feature_matrix = feature_matrix[legal_features]
# Perform feature selection
if int(selection_percent)!=1:
if selection_strategy=="best":
corrs = mutual_info_classif(feature_matrix.reset_index(drop=True), y)
corrs = pd.Series(corrs, name="")
selected_corrs = corrs[corrs>=corrs.quantile(1-selection_percent)]
selected_feature_df = feature_matrix.iloc[:, list(selected_corrs.keys())].reset_index(drop=True)
elif selection_strategy=="random":
selected_feature_df = feature_matrix.sample(frac=(selection_percent), axis=1).reset_index(drop=True)
else:
raise Exception("selection_strategy can be either 'best' or 'random', got '"+str(selection_strategy)+"'.")
else:
selected_feature_df = feature_matrix.reset_index(drop=True)
if num_depth_steps!=1:
rename_dict = {}
for col in list(selected_feature_df.columns):
rename_dict[col] = "("+col+")"
selected_feature_df = selected_feature_df.rename(columns=rename_dict)
if num_depth_steps!=1:
rename_dict = {}
for feature_name in list(selected_feature_df.columns):
rename_dict[feature_name] = feature_name[int(num_depth_steps-1):-int(num_depth_steps-1)]
selected_feature_df = selected_feature_df.rename(columns=rename_dict)
return selected_feature_df
Here is an example of using it:
# Imports
>>> import seaborn as sns
>>> import pandas as pd
>>> import numpy as np
>>> from sklearn.preprocessing import OrdinalEncoder
# Load the penguins dataset
>>> penguins = sns.load_dataset("penguins")
>>> penguins.head()
species island bill_length_mm bill_depth_mm flipper_length_mm body_mass_g sex
0 Adelie Torgersen 39.1 18.7 181.0 3750.0 Male
1 Adelie Torgersen 39.5 17.4 186.0 3800.0 Female
2 Adelie Torgersen 40.3 18.0 195.0 3250.0 Female
3 Adelie Torgersen NaN NaN NaN NaN NaN
4 Adelie Torgersen 36.7 19.3 193.0 3450.0 Female
# Fill in NaN values of features using the distribution of the feature
>>> for feature in ["bill_length_mm", "bill_depth_mm", "flipper_length_mm", "body_mass_g", "sex"]:
... s = penguins[feature].value_counts(normalize=True)
... dist = penguins[feature].value_counts(normalize=True).values
... missing = penguins[feature].isnull()
... penguins.loc[missing, feature] = np.random.choice(s.index, size=len(penguins[missing]),p=s.values)
# Make X and y
>>> X = penguins[["bill_length_mm", "bill_depth_mm", "flipper_length_mm", "body_mass_g"]]
>>> y = penguins[["sex"]]
# Encode "sex" so that "Male" is 1 and "Female" is 0
>>> ord_enc = OrdinalEncoder()
>>> y = pd.DataFrame(ord_enc.fit_transform(y).astype(np.int8), columns=["sex"])
# Generate new dataset with more features
>>> penguins_with_more_features = auto_feature_engineering(X, y, selection_percent=1.)
# Correlations of the raw features
>>> find_correlations(X, y)
body_mass_g 0.422959
bill_depth_mm 0.353526
bill_length_mm 0.342109
flipper_length_mm 0.246944
Name: sex, dtype: float64
# Top 10% correlations of new features
>>> summarize_corr_series(find_top_percent(find_correlations(penguins_with_more_features, y), 0.1))
(flipper_length_mm / bill_depth_mm) / (body_mass_g): 0.7241123396175027
(bill_depth_mm * body_mass_g) / (flipper_length_mm): 0.7237223914820166
(bill_depth_mm * body_mass_g) * (bill_depth_mm): 0.7222108721971968
(bill_depth_mm * body_mass_g): 0.7202272416625914
(bill_depth_mm * body_mass_g) * (flipper_length_mm): 0.6425813490692588
(bill_depth_mm * bill_length_mm) * (body_mass_g): 0.6398235593646668
(bill_depth_mm * flipper_length_mm) * (flipper_length_mm): 0.6360645935216128
(bill_depth_mm * flipper_length_mm): 0.6083364815975281
(bill_depth_mm * body_mass_g) * (body_mass_g): 0.5888925994060027
In this example, we would like to predict the gender of penguins given their attributes body_mass_g, bill_depth_mm, bill_length_mm and flipper_length_mm.
You might notice these other mysterious functions I used in the example, namely find_correlations, summarize_corr_series and find_top_percent. These are other convenient functions I made to help summarize the results from auto_feature_engineering. Here is the code to them (note they haven't been documented):
def summarize_corr_series(feature_corr_series):
max_feature_name_size = 0
for key in feature_corr_series.to_dict().keys():
if len(key) > max_feature_name_size:
max_feature_name_size = len(key)
max_new_feature_corr = feature_corr_series.max()
for key in feature_corr_series.to_dict().keys():
whitespace = []
for i in range(max_feature_name_size-len(key)):
whitespace.append(" ")
whitespace = "".join(whitespace)
print(key+": "+whitespace+str(abs(feature_corr_series[key])))
def find_top_percent(series, percent):
return series[series>series.quantile(1-percent)]
def find_correlations(X, y):
return abs(pd.concat([X.reset_index(drop=True), y.reset_index(drop=True)], axis=1).corr())[y.columns[0]].drop(y.columns[0]).sort_values(ascending=False)
It is really unfortunate that featuretools does not easily support this use case since it appears to be quite common. The best way I've found to do this is to create the first order features you want using the dfs function and then add the second order features you want manually.
For instance the MWE below (using the iris dataset) performs the AddNumeric primitive using dfs and then applies the DivideNumeric primitive to the newly created features using only the original features (and avoids the same base feature appearing multiple times in a transformed feature).
import numpy as np
import pandas as pd
import sklearn
import featuretools as ft
iris = sklearn.datasets.load_iris()
data = pd.DataFrame(
data= np.c_[iris['data'],
iris['target']],
columns= iris['feature_names'] + ['target']
)
ignore_cols = ['target']
entity_set = ft.EntitySet(id="iris")
entity_set.entity_from_dataframe(
entity_id="iris_main",
dataframe=data,
index="index",
)
new_features = ft.dfs(
entityset=entity_set,
target_entity="iris_main",
trans_primitives=["add_numeric"],
features_only=True,
primitive_options={
"add_numeric": {
"ignore_variables": {"iris_main": ignore_cols},
},
},
)
transformed_features = [i for i in new_features if isinstance(i, ft.feature_base.feature_base.TransformFeature)]
original_features = [i for i in new_features if isinstance(i, ft.feature_base.feature_base.IdentityFeature) and i.get_name() not in ignore_cols]
depth_two_features = []
for trans_feat in transformed_features:
for orig_feat in original_features:
if orig_feat.get_name() not in [i.get_name() for i in trans_feat.base_features]:
feat = ft.Feature([trans_feat, orig_feat], primitive=ft.primitives.DivideNumeric)
depth_two_features.append(feat)
data = ft.calculate_feature_matrix(
features= original_features + transformed_features + depth_two_features,
entityset=entity_set,
verbose=True,
)
The benefit of this approach is that it gives you more fine grained control to customise this how you want and avoids the computational cost of creating unnecessary features you don't want.
Strange error from numpy via matplotlib when trying to get a histogram of a tiny toy dataset. I'm just not sure how to interpret the error, which makes it hard to see what to do next.
Didn't find much related, though this nltk question and this gdsCAD question are superficially similar.
I intend the debugging info at bottom to be more helpful than the driver code, but if I've missed something, please ask. This is reproducible as part of an existing test suite.
if n > 1:
return diff(a[slice1]-a[slice2], n-1, axis=axis)
else:
> return a[slice1]-a[slice2]
E TypeError: ufunc 'subtract' did not contain a loop with signature matching types dtype('<U1') dtype('<U1') dtype('<U1')
../py2.7.11-venv/lib/python2.7/site-packages/numpy/lib/function_base.py:1567: TypeError
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> entering PDB >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
> py2.7.11-venv/lib/python2.7/site-packages/numpy/lib/function_base.py(1567)diff()
-> return a[slice1]-a[slice2]
(Pdb) bt
[...]
py2.7.11-venv/lib/python2.7/site-packages/matplotlib/axes/_axes.py(5678)hist()
-> m, bins = np.histogram(x[i], bins, weights=w[i], **hist_kwargs)
py2.7.11-venv/lib/python2.7/site-packages/numpy/lib/function_base.py(606)histogram()
-> if (np.diff(bins) < 0).any():
> py2.7.11-venv/lib/python2.7/site-packages/numpy/lib/function_base.py(1567)diff()
-> return a[slice1]-a[slice2]
(Pdb) p numpy.__version__
'1.11.0'
(Pdb) p matplotlib.__version__
'1.4.3'
(Pdb) a
a = [u'A' u'B' u'C' u'D' u'E']
n = 1
axis = -1
(Pdb) p slice1
(slice(1, None, None),)
(Pdb) p slice2
(slice(None, -1, None),)
(Pdb)
I got the same error, but in my case I am subtracting dict.key from dict.value. I have fixed this by subtracting dict.value for corresponding key from other dict.value.
cosine_sim = cosine_similarity(e_b-e_a, w-e_c)
here I got error because e_b, e_a and e_c are embedding vector for word a,b,c respectively. I didn't know that 'w' is string, when I sought out w is string then I fix this by following line:
cosine_sim = cosine_similarity(e_b-e_a, word_to_vec_map[w]-e_c)
Instead of subtracting dict.key, now I have subtracted corresponding value for key
I had a similar issue where an integer in a row of a DataFrame I was iterating over was of type numpy.int64. I got the
TypeError: ufunc 'subtract' did not contain a loop with signature matching types dtype('<U1') dtype('<U1') dtype('<U1')
error when trying to subtract a float from it.
The easiest fix for me was to convert the row using pd.to_numeric(row).
Why is it applying diff to an array of strings.
I get an error at the same point, though with a different message
In [23]: a=np.array([u'A' u'B' u'C' u'D' u'E'])
In [24]: np.diff(a)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-24-9d5a62fc3ff0> in <module>()
----> 1 np.diff(a)
C:\Users\paul\AppData\Local\Enthought\Canopy\User\lib\site-packages\numpy\lib\function_base.pyc in diff(a, n, axis)
1112 return diff(a[slice1]-a[slice2], n-1, axis=axis)
1113 else:
-> 1114 return a[slice1]-a[slice2]
1115
1116
TypeError: unsupported operand type(s) for -: 'numpy.ndarray' and 'numpy.ndarray'
Is this a array the bins parameter? What does the docs say bins should be?
I am fairly new to this myself, but I had a similar error and found that it is due to a type casting issue. I was trying to concatenate rather than take the difference but I think the principle is the same here. I provided a similar answer on another question so I hope that is OK.
In essence you need to use a different data type cast, in my case I needed str not float, I suspect yours is the same so my suggested solution is. I am sorry I cannot test it before suggesting but I am unclear from your example what you were doing.
return diff(str(a[slice1])-str(a[slice2]), n-1, axis=axis)
Please see my example code below for the fix to my code, the change occurs on the third to last line. The code is to produce a basic random forest model.
import scipy
import math
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn import preprocessing, metrics, cross_validation
Data = pd.read_csv("Free_Energy_exp.csv", sep=",")
Data = Data.fillna(Data.mean()) # replace the NA values with the mean of the descriptor
header = Data.columns.values # Ues the column headers as the descriptor labels
Data.head()
test_name = "Test.csv"
npArray = np.array(Data)
print header.shape
npheader = np.array(header[1:-1])
print("Array shape X = %d, Y = %d " % (npArray.shape))
datax, datay = npArray.shape
names = npArray[:,0]
X = npArray[:,1:-1].astype(float)
y = npArray[:,-1] .astype(float)
X = preprocessing.scale(X)
XTrain, XTest, yTrain, yTest = cross_validation.train_test_split(X,y, random_state=0)
# Predictions results initialised
RFpredictions = []
RF = RandomForestRegressor(n_estimators = 10, max_features = 5, max_depth = 5, random_state=0)
RF.fit(XTrain, yTrain) # Train the model
print("Training R2 = %5.2f" % RF.score(XTrain,yTrain))
RFpreds = RF.predict(XTest)
with open(test_name,'a') as fpred :
lenpredictions = len(RFpreds)
lentrue = yTest.shape[0]
if lenpredictions == lentrue :
fpred.write("Names/Label,, Prediction Random Forest,, True Value,\n")
for i in range(0,lenpredictions) :
fpred.write(RFpreds[i]+",,"+yTest[i]+",\n")
else :
print "ERROR - names, prediction and true value array size mismatch."
This leads to an error of;
Traceback (most recent call last):
File "min_example.py", line 40, in <module>
fpred.write(RFpreds[i]+",,"+yTest[i]+",\n")
TypeError: ufunc 'add' did not contain a loop with signature matching types dtype('S32') dtype('S32') dtype('S32')
The solution is to make each variable a str() type on the third to last line then write to file. No other changes to then code have been made from the above.
import scipy
import math
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn import preprocessing, metrics, cross_validation
Data = pd.read_csv("Free_Energy_exp.csv", sep=",")
Data = Data.fillna(Data.mean()) # replace the NA values with the mean of the descriptor
header = Data.columns.values # Ues the column headers as the descriptor labels
Data.head()
test_name = "Test.csv"
npArray = np.array(Data)
print header.shape
npheader = np.array(header[1:-1])
print("Array shape X = %d, Y = %d " % (npArray.shape))
datax, datay = npArray.shape
names = npArray[:,0]
X = npArray[:,1:-1].astype(float)
y = npArray[:,-1] .astype(float)
X = preprocessing.scale(X)
XTrain, XTest, yTrain, yTest = cross_validation.train_test_split(X,y, random_state=0)
# Predictions results initialised
RFpredictions = []
RF = RandomForestRegressor(n_estimators = 10, max_features = 5, max_depth = 5, random_state=0)
RF.fit(XTrain, yTrain) # Train the model
print("Training R2 = %5.2f" % RF.score(XTrain,yTrain))
RFpreds = RF.predict(XTest)
with open(test_name,'a') as fpred :
lenpredictions = len(RFpreds)
lentrue = yTest.shape[0]
if lenpredictions == lentrue :
fpred.write("Names/Label,, Prediction Random Forest,, True Value,\n")
for i in range(0,lenpredictions) :
fpred.write(str(RFpreds[i])+",,"+str(yTest[i])+",\n")
else :
print "ERROR - names, prediction and true value array size mismatch."
These examples are from a larger code so I hope the examples are clear enough.
I think #James is right. I got stuck by same error while working on Polyval(). And yeah solution is to use the same type of variabes. You can use typecast to cast all variables in the same type.
BELOW IS A EXAMPLE CODE
import numpy
P = numpy.array(input().split(), float)
x = float(input())
print(numpy.polyval(P,x))
here I used float as an output type. so even the user inputs the INT value (whole number). the final answer will be typecasted to float.
I ran into the same issue, but in my case it was just a Python list instead of a Numpy array used. Using two Numpy arrays solved the issue for me.
For those that want to export a simple 3D numpy array (along with axes) to a .vtk (or .vtr) file for post-processing and display in Paraview or Mayavi there's a little module called PyEVTK that does exactly that. The module supports structured and unstructured data etc..
Unfortunately, even though the code works fine in unix-based systems I couldn't make it work (keeps crashing) on any windows installation which simply makes things complicated. Ive contacted the developer but his suggestions did not work
Therefore my question is:
How can one use the from vtk.util import numpy_support function to export a 3D array (the function itself doesn't support 3D arrays) to a .vtk file? Is there a simple way to do it without creating vtkDatasets etc etc?
Thanks a lot!
It's been forever and I had entirely forgotten asking this question but I ended up figuring it out. I've written a post about it in my blog (PyScience) providing a tutorial on how to convert between NumPy and VTK. Do take a look if interested:
pyscience.wordpress.com/2014/09/06/numpy-to-vtk-converting-your-numpy-arrays-to-vtk-arrays-and-files/
It's not a direct answer to your question, but if you have tvtk (if you have mayavi, you should have it), you can use it to write your data to vtk format. (See: http://code.enthought.com/projects/files/ETS3_API/enthought.tvtk.misc.html )
It doesn't use PyEVTK, and it supports a broad range of data sources (more than just structured and unstructured grids), so it will probably work where other things aren't.
As a quick example (Mayavi's mlab interface can make this much less verbose, especially if you're already using it.):
import numpy as np
from enthought.tvtk.api import tvtk, write_data
data = np.random.random((10,10,10))
grid = tvtk.ImageData(spacing=(10, 5, -10), origin=(100, 350, 200),
dimensions=data.shape)
grid.point_data.scalars = np.ravel(order='F')
grid.point_data.scalars.name = 'Test Data'
# Writes legacy ".vtk" format if filename ends with "vtk", otherwise
# this will write data using the newer xml-based format.
write_data(grid, 'test.vtk')
And a portion of the output file:
# vtk DataFile Version 3.0
vtk output
ASCII
DATASET STRUCTURED_POINTS
DIMENSIONS 10 10 10
SPACING 10 5 -10
ORIGIN 100 350 200
POINT_DATA 1000
SCALARS Test%20Data double
LOOKUP_TABLE default
0.598189 0.228948 0.346975 0.948916 0.0109774 0.30281 0.643976 0.17398 0.374673
0.295613 0.664072 0.307974 0.802966 0.836823 0.827732 0.895217 0.104437 0.292796
0.604939 0.96141 0.0837524 0.498616 0.608173 0.446545 0.364019 0.222914 0.514992
...
...
TVTK of Mayavi has a beautiful way of writing vtk files. Here is a test example I have written for myself following #Joe and tvtk documentation. The advantage it has over evtk, is the support for both ascii and html.Hope it will help other people.
from tvtk.api import tvtk, write_data
import numpy as np
#data = np.random.random((3, 3, 3))
#
#i = tvtk.ImageData(spacing=(1, 1, 1), origin=(0, 0, 0))
#i.point_data.scalars = data.ravel()
#i.point_data.scalars.name = 'scalars'
#i.dimensions = data.shape
#
#w = tvtk.XMLImageDataWriter(input=i, file_name='spoints3d.vti')
#w.write()
points = np.array([[0,0,0], [1,0,0], [1,1,0], [0,1,0]], 'f')
(n1, n2) = points.shape
poly_edge = np.array([[0,1,2,3]])
print n1, n2
## Scalar Data
#temperature = np.array([10., 20., 30., 40.])
#pressure = np.random.rand(n1)
#
## Vector Data
#velocity = np.random.rand(n1,n2)
#force = np.random.rand(n1,n2)
#
##Tensor Data with
comp = 5
stress = np.random.rand(n1,comp)
#
#print stress.shape
## The TVTK dataset.
mesh = tvtk.PolyData(points=points, polys=poly_edge)
#
## Data 0 # scalar data
#mesh.point_data.scalars = temperature
#mesh.point_data.scalars.name = 'Temperature'
#
## Data 1 # additional scalar data
#mesh.point_data.add_array(pressure)
#mesh.point_data.get_array(1).name = 'Pressure'
#mesh.update()
#
## Data 2 # Vector data
#mesh.point_data.vectors = velocity
#mesh.point_data.vectors.name = 'Velocity'
#mesh.update()
#
## Data 3 additional vector data
#mesh.point_data.add_array( force)
#mesh.point_data.get_array(3).name = 'Force'
#mesh.update()
mesh.point_data.tensors = stress
mesh.point_data.tensors.name = 'Stress'
# Data 4 additional tensor Data
#mesh.point_data.add_array(stress)
#mesh.point_data.get_array(4).name = 'Stress'
#mesh.update()
write_data(mesh, 'polydata.vtk')
# XML format
# Method 1
#write_data(mesh, 'polydata')
# Method 2
#w = tvtk.XMLPolyDataWriter(input=mesh, file_name='polydata.vtk')
#w.write()
I know it is a bit late and I do love your tutorials #somada141. This should work too.
def numpy2VTK(img, spacing=[1.0, 1.0, 1.0]):
# evolved from code from Stou S.,
# on http://www.siafoo.net/snippet/314
# This function, as the name suggests, converts numpy array to VTK
importer = vtk.vtkImageImport()
img_data = img.astype('uint8')
img_string = img_data.tostring() # type short
dim = img.shape
importer.CopyImportVoidPointer(img_string, len(img_string))
importer.SetDataScalarType(VTK_UNSIGNED_CHAR)
importer.SetNumberOfScalarComponents(1)
extent = importer.GetDataExtent()
importer.SetDataExtent(extent[0], extent[0] + dim[2] - 1,
extent[2], extent[2] + dim[1] - 1,
extent[4], extent[4] + dim[0] - 1)
importer.SetWholeExtent(extent[0], extent[0] + dim[2] - 1,
extent[2], extent[2] + dim[1] - 1,
extent[4], extent[4] + dim[0] - 1)
importer.SetDataSpacing(spacing[0], spacing[1], spacing[2])
importer.SetDataOrigin(0, 0, 0)
return importer
Hope it helps!
Here's a SimpleITK version with the function load_itk taken from here:
import SimpleITK as sitk
import numpy as np
if len(sys.argv)<3:
print('Wrong number of arguments.', file=sys.stderr)
print('Usage: ' + __file__ + ' input_sitk_file' + ' output_sitk_file', file=sys.stderr)
sys.exit(1)
def quick_read(filename):
# Read image information without reading the bulk data.
file_reader = sitk.ImageFileReader()
file_reader.SetFileName(filename)
file_reader.ReadImageInformation()
print('image size: {0}\nimage spacing: {1}'.format(file_reader.GetSize(), file_reader.GetSpacing()))
# Some files have a rich meta-data dictionary (e.g. DICOM)
for key in file_reader.GetMetaDataKeys():
print(key + ': ' + file_reader.GetMetaData(key))
def load_itk(filename):
# Reads the image using SimpleITK
itkimage = sitk.ReadImage(filename)
# Convert the image to a numpy array first and then shuffle the dimensions to get axis in the order z,y,x
data = sitk.GetArrayFromImage(itkimage)
# Read the origin of the ct_scan, will be used to convert the coordinates from world to voxel and vice versa.
origin = np.array(list(reversed(itkimage.GetOrigin())))
# Read the spacing along each dimension
spacing = np.array(list(reversed(itkimage.GetSpacing())))
return data, origin, spacing
def convert(data, output_filename):
image = sitk.GetImageFromArray(data)
writer = sitk.ImageFileWriter()
writer.SetFileName(output_filename)
writer.Execute(image)
def wait():
print('Press Enter to load & convert or exit using Ctrl+C')
input()
quick_read(sys.argv[1])
print('-'*20)
wait()
data, origin, spacing = load_itk(sys.argv[1])
convert(sys.argv[2])