I have this code, i think im doing something wrong with de input of de data in the neural net (the neural net is only provisionally)
here is my code:
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
import tflearn
data = pd.read_csv('winequality-white.csv', sep=';')
X = data[['fixed acidity',
'volatile acidity',
'citric acid',
'residual sugar',
'chlorides',
'free sulfur dioxide',
'total sulfur dioxide',
'density',
'pH',
'sulphates',
'alcohol']]
y = data['quality']
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.1)
# Build neural network
net = tflearn.input_data(shape=[None, 11])
net = tflearn.fully_connected(net, 110)
net = tflearn.regression(net)
# Define model
model = tflearn.DNN(net)
# Start training (apply gradient descent algorithm)
model.fit(X_train, y_train, n_epoch=10, batch_size=1, show_metric=True)
the data i use looks like this:
"fixed acidity";"volatile acidity";"citric acid";"residual sugar";"chlorides";"free sulfur dioxide";"total sulfur dioxide";"density";"pH";"sulphates";"alcohol";"quality"
7;0.27;0.36;20.7;0.045;45;170;1.001;3;0.45;8.8;6
6.3;0.3;0.34;1.6;0.049;14;132;0.994;3.3;0.49;9.5;6
8.1;0.28;0.4;6.9;0.05;30;97;0.9951;3.26;0.44;10.1;6
7.2;0.23;0.32;8.5;0.058;47;186;0.9956;3.19;0.4;9.9;6
7.2;0.23;0.32;8.5;0.058;47;186;0.9956;3.19;0.4;9.9;6
8.1;0.28;0.4;6.9;0.05;30;97;0.9951;3.26;0.44;10.1;6
6.2;0.32;0.16;7;0.045;30;136;0.9949;3.18;0.47;9.6;6
...
this is the error i get:
=== RESTART: /home/tux/Schreibtisch/Projects/Wineclassifier/classifier.py ===
hdf5 is not supported on this machine (please install/reinstall h5py for optimal experience)
---------------------------------
Run id: Q6AJQP
Log directory: /tmp/tflearn_logs/
---------------------------------Exception in thread Thread-3:
Traceback (most recent call last):
File "/usr/lib/python3.5/threading.py", line 914, in _bootstrap_inner
self.run()
File "/usr/lib/python3.5/threading.py", line 862, in run
self._target(*self._args, **self._kwargs)
File "/usr/local/lib/python3.5/dist-packages/tflearn/data_flow.py", line 187, in fill_feed_dict_queue
data = self.retrieve_data(batch_ids)
File "/usr/local/lib/python3.5/dist-packages/tflearn/data_flow.py", line 222, in retrieve_data
utils.slice_array(self.feed_dict[key], batch_ids)
File "/usr/local/lib/python3.5/dist-packages/tflearn/utils.py", line 187, in slice_array
return X[start]
File "/usr/local/lib/python3.5/dist-packages/pandas/core/frame.py", line 2056, in __getitem__
return self._getitem_array(key)
File "/usr/local/lib/python3.5/dist-packages/pandas/core/frame.py", line 2100, in _getitem_array
indexer = self.loc._convert_to_indexer(key, axis=1)
File "/usr/local/lib/python3.5/dist-packages/pandas/core/indexing.py", line 1231, in _convert_to_indexer
raise KeyError('%s not in index' % objarr[mask])
KeyError: '[3374] not in index'
Training samples: 4408
Validation samples: 0
--
Somebody a clue whats wrong?
Place the following code at the beginning of your code to reset the graph. If you ran the code interactively in IPython environment, the tensorflow graph can get really messy.
import tensorflow as tf
tf.reset_default_graph()
Many people are in the same shoes as you do. Refer to this Github issue for more detail.
Related
I would like to implement a custom loss function, and I am using tensorflow with keras backend.
In my loss function for each training sample (2D matrix of size (2048x192) I would like to add a bandpassed version of the corresponding training sample as a constant (non-trainable) value.
I implemented bandpass filter based on How to implement band-pass Butterworth filter with Scipy.signal.butter
from scipy.signal import butter, sosfiltfilt
def butter_bandpass_sos(lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
sos = butter(order, [low, high], analog=False, btype='band', output='sos')
return sos
def butter_bandpass_filter_sos(data, lowcut, highcut, fs, order=5):
sos = butter_bandpass_sos(lowcut, highcut, fs, order=order)
y = sosfiltfilt(sos, data)
return y
and for the loss function based on Adding a constant to Loss function in Tensorflow, I implemented:
from tensorflow.python.keras import backend
import tensorflow as tf
lowcut = 2.5e6
highcut = 7.5e6
order = 5
fs = 40e6
def HP_func(mat):
for i in range(0, 192):
RF_ch = mat[:, i]
y = butter_bandpass_filter_sos(RF_ch, lowcut, highcut, fs, order=order)
mat_band_sos[:, i] = y
return mat_band_sos
def my_custom_loss_HF(y_true,y_pred):
HF_mat = HP_func(y_true)
loss = backend.sqrt(tf.keras.losses.mean_squared_error(y_true, y_pred)) + HF_mat
return loss
I have three branches and therefore three losses:
model.compile(loss=['mean_squared_error', my_custom_loss_HF,'mean_squared_error'],
loss_weights=[1.0, 1.0, 1.0],
optimizer='Adam',
metrics=['mae', rmse])
but I am getting this error:
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/IPython/core/interactiveshell.py", line 2882, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-19-01efc0717223>", line 6, in <module>
metrics=['mae', rmse])
File "/home/z003zpjj/venv/lib/python3.6/site-packages/tensorflow/python/training/tracking/base.py", line 457, in _method_wrapper
result = method(self, *args, **kwargs)
File "/home/z003zpjj/venv/lib/python3.6/site-packages/tensorflow/python/keras/engine/training.py", line 337, in compile
self._compile_weights_loss_and_weighted_metrics()
File "/home/z003zpjj/venv/lib/python3.6/site-packages/tensorflow/python/training/tracking/base.py", line 457, in _method_wrapper
result = method(self, *args, **kwargs)
File "/home/z003zpjj/venv/lib/python3.6/site-packages/tensorflow/python/keras/engine/training.py", line 1710, in _compile_weights_loss_and_weighted_metrics
self.total_loss = self._prepare_total_loss(masks)
File "/home/z003zpjj/venv/lib/python3.6/site-packages/tensorflow/python/keras/engine/training.py", line 1770, in _prepare_total_loss
per_sample_losses = loss_fn.call(y_true, y_pred)
File "/home/z003zpjj/venv/lib/python3.6/site-packages/tensorflow/python/keras/losses.py", line 215, in call
return self.fn(y_true, y_pred, **self._fn_kwargs)
File "<ipython-input-18-a4f1cf924d3f>", line 3, in my_custom_loss_HF
HF_mat = HP_fun(y_true)
File "<ipython-input-17-74a2f0e736b9>", line 19, in HP_fun
y = butter_bandpass_filter_sos(RF_ch, lowcut, highcut, fs, order=order)
File "<ipython-input-2-4e34aa35b4cd>", line 69, in butter_bandpass_filter_sos
y = sosfiltfilt(sos, data)
File "/home/z003zpjj/venv/lib/python3.6/site-packages/scipy/signal/signaltools.py", line 4131, in sosfiltfilt
x = _validate_x(x)
File "/home/z003zpjj/venv/lib/python3.6/site-packages/scipy/signal/signaltools.py", line 3926, in _validate_x
raise ValueError('x must be at least 1D')
ValueError: x must be at least 1D
How can I use the scipy function in my loss function?
you need to rewrite the function to work with tensors, otherwise it doesnt have a gradient, the network cant be trained.
you can use only functions like keras backend.
your function has even a for, forget the logic in loss function.
I am trying to convert onnx model to TensorFlow's .pb model. But when I do the prepare function, I got this error: NotImplementedError: Einsum is not implemented.
Are there some suggestions to solve this problem? Thanks!
By the way, here are the code I use to convert the model:
import onnx
from onnx_tf.backend import prepare
def onnx2pb(model, pb_output_path, opset_version=12):
tf_exp = prepare(onnx_model) # prepare tf representation
tf_exp.export_graph(pb_output_path) # export the model
onnx_input_path = './model.onnx'
pb_output_path = './model.pb'
onnx_model = onnx.load(onnx_input_path)
onnx2pb(onnx_model, pb_output_path)
>>> below are the error report
tf_exp = prepare(onnx_model) # prepare tf representation
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/onnx_tf/backend.py", line 66, in prepare
return cls.onnx_model_to_tensorflow_rep(model, strict)
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/onnx_tf/backend.py", line 86, in onnx_model_to_tensorflow_rep
return cls._onnx_graph_to_tensorflow_rep(model.graph, opset_import, strict)
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/onnx_tf/backend.py", line 143, in _onnx_graph_to_tensorflow_rep
output_ops = cls._onnx_node_to_tensorflow_op(onnx_node,
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/onnx_tf/backend.py", line 254, in _onnx_node_to_tensorflow_op
exception.OP_UNIMPLEMENTED_EXCEPT(node.op_type)
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/onnx_tf/common/exception.py", line 30, in __call__
super(OpUnimplementedException, self).__call__(op, version, domain)
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/onnx_tf/common/exception.py", line 12, in __call__
raise self._func(self.get_message(*args, **kwargs))
NotImplementedError: Einsum is not implemented.
[Finished in 5.3s with exit code 1]
I have solved this by changing torch.einsum to the correlated matrix multiplication :)
I am very new to flask. I developed a document classification model using CNN model in Keras in Python3. Below is the code i am using for app.py file in windows machine.
I got the code example from here and improvised it to suit my needs
import os
from flask import jsonify
from flask import request
from flask import Flask
import numpy as np
from keras.models import model_from_json
from keras.models import load_model
from keras.preprocessing.text import Tokenizer, text_to_word_sequence
from keras.preprocessing.sequence import pad_sequences
#star Flask application
app = Flask(__name__)
path = 'C:/Users/user/Model/'
json_file = open(path+'/model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
keras_model_loaded = model_from_json(loaded_model_json)
keras_model_loaded.load_weights(path+'/model.h5')
print('Model loaded...')
def preprocess_text(text,num_max = 1000,max_review_length = 100):
tok = Tokenizer(num_words=num_max)
tok.fit_on_texts(texts)
cnn_texts_seq = tok.texts_to_sequences(texts)
cnn_texts_mat = sequence.pad_sequences(cnn_texts_seq,maxlen=max_review_length)
return cnn_texts_mat
# URL that we'll use to make predictions using get and post
#app.route('/predict',methods=['GET','POST'])
def predict():
try:
text = request.args.get('text')
x = preprocess_text(text)
y = int(np.round(keras_model_loaded.predict(x)))
#print(y)
return jsonify({'prediction': str(y)})
except:
response = jsonify({'error': 'problem predicting'})
response.status_code = 400
return response
if __name__ == "__main__":
port = int(os.environ.get('PORT', 5000))
# Run locally
app.run(host='0.0.0.0', port=port)
In my windows machine i navigate to the path in the console where i have saved app.py file and execute the command py -3.6 app.py
When i go the url http://localhost:5000/predict and type in browser
http://localhost:5000/predict?text=I've had my Fire HD 8 two weeks now and I love it. This tablet is a great value. We are Prime Members and that is where this tablet SHINES.
it does not give me any class as output, but instead i get this as output {"error":"problem predicting"}.
Any help on how to fix this?
Edit: I removed the try except block in the predict function. Below is how predict function looks like
def predict():
text = request.args.get('text')
x = preprocess_text(text)
y = int(np.round(keras_model_loaded.predict(x)))
return jsonify({'prediction': str(y)})
Now i am getting exception. error message is
[2018-05-28 18:33:59,008] ERROR in app: Exception on /predict [GET]
Traceback (most recent call last):
File "C:\Users\User\AppData\Local\Programs\Python\Python36\lib\site-packages\flask\app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "C:\Users\User\AppData\Local\Programs\Python\Python36\lib\site-packages\flask\app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "C:\Users\User\AppData\Local\Programs\Python\Python36\lib\site-packages\flask\app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "C:\Users\User\AppData\Local\Programs\Python\Python36\lib\site-packages\flask\_compat.py", line 35, in reraise
raise value
File "C:\Users\User\AppData\Local\Programs\Python\Python36\lib\site-packages\flask\app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "C:\Users\User\AppData\Local\Programs\Python\Python36\lib\site-packages\flask\app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "app.py", line 59, in predict
x = preprocess_text(text)
File "app.py", line 37, in preprocess_text
tok.fit_on_texts(texts)
NameError: name 'texts' is not defined
127.0.0.1 - - [28/May/2018 18:33:59] "GET /predict?text=I%27ve%20had%20my%20Fire%20HD%208%20two%20weeks%20now%20and%20I%20love%20it.%20This%20tablet%20is%20a%20great%20value.%20We%20are%20Prime%20Members%20and%20that%20is%20where%20this%20tablet%20SHINES. HTTP/1.1" 500 -
Edit2: I have edited code to
def preprocess_text(texts,num_max = 1000,max_review_length = 100):
tok = Tokenizer(num_words=num_max)
tok.fit_on_texts(texts)
cnn_texts_seq = tok.texts_to_sequences(texts)
cnn_texts_mat = pad_sequences(cnn_texts_seq,maxlen=max_review_length)
return cnn_texts_mat
# URL that we'll use to make predictions using get and post
#app.route('/predict',methods=['GET','POST'])
def predict():
text = request.args.get('text')
x = preprocess_text(text)
y = keras_model_loaded.predict(x)
return jsonify({'prediction': str(y)})
and now the error message is
packages\tensorflow\python\framework\ops.py", line 3402, in _as_graph_element_locked
raise ValueError("Tensor %s is not an element of this graph." % obj)
ValueError: Tensor Tensor("output/Sigmoid:0", shape=(?, 1), dtype=float32) is not an element of this graph.
127.0.0.1 - - [28/May/2018 19:39:11] "GET /predict?text=I%27ve%20had%20my%20Fire%20HD%208%20two%20weeks%20now%20and%20I%20love%20it.%20This%20tablet%20is%20a%20great%20value.%20We%20are%20Prime%20Members%20and%20that%20is%20where%20this%20tablet%20SHINES. HTTP/1.1" 500 -
I am unable to understand and debug this error. Not sure what this means. Can anyone help me understand this error and suggest a solution for this?
Also, i am unable to post the entire error message in stackoverflow as most of the chunk in my question appears to be code.
Thanks!!
Now it is what I guessed. There is a problem when using cross-threads with Flask and Tensorflow. Here is a fix for it:
import tensorflow as tf
# ...
graph = tf.get_default_graph()
def predict():
text = request.args.get('text')
x = preprocess_text(text)
with graph.as_default():
y = int(np.round(keras_model_loaded.predict(x)))
return jsonify({'prediction': str(y)})
by wrapping the prediction to forcefully use the default graph.
The code is showed below,and the wrong message is also showed below:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import urllib.request
import tensorflow as tf
import numpy as np
IRIS_TRAINING = "iris_training.csv"
IRIS_TRAINING_URL = "http://download.tensorflow.org/data/iris_training.csv"
IRIS_TEST = "iris_test.csv"
IRIS_TEST_RRL = "http://download.tensorflow.org/data/iris_test.csv"
if not os.path.exists(IRIS_TRAINING):
raw = urllib.request.urlopen(IRIS_TRAINING_URL).read()
with open(IRIS_TRAINING, 'w') as f:
f.write(raw)
if not os.path.exists(IRIS_TEST):
raw = urllib.request.urlopen(IRIS_TEST_RRL).read()
with open(IRIS_TEST, 'w') as f:
f.write(raw)
# load datasets.
training_set = tf.contrib.learn.datasets.base.load_csv_without_header(
filename=IRIS_TRAINING,
target_dtype=np.int,
features_dtype=np.float32)
test_set = tf.contrib.learn.datasets.base.load_csv_without_header(
filename=IRIS_TEST,
target_dtype=np.int,
features_dtype=np.float32
)
# Specify that all features have real_valued data
feature_columns = [tf.contrib.layers.real_valued_column("", dimension=4)]
# Build 3 layers DNN with 10, 20, 10 units respectively.
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
hidden_units=[10, 20, 30],
n_class=3,
model_dir="/tem/iris_model")
# Define the training imputs
def get_train_inputs():
x = tf.constant(training_set.data)
y = tf.constant(training_set.target)
return x, y
# Fit model
classifier.fit(input_fn=get_train_inputs(), steps=2000)
# Define the test inputs
def get_test_inputs():
x = tf.constant(test_set.data)
y = tf.constant(test_set.target)
return x, y
# Evaluate accuracy
accuracy_score = classifier.evaluate(input_fn=get_test_inputs(), steps=1)["accuracy"]
print("\nTest Accuracy: {0:f}\n".format(accuracy_score))
This prints the following stack-trace:
Traceback (most recent call last):
File "/home/skyfacon/PycharmProjects/LinearFitting/IrisClassification.py", line 35, in <module>
features_dtype=np.float32
File "/home/skyfacon/anaconda3/envs/tensorflow/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/base.py", line 69, in load_csv_without_header
data.append(np.asarray(row, dtype=features_dtype))
File "/home/skyfacon/anaconda3/envs/tensorflow/lib/python3.6/site-packages/numpy/core/numeric.py", line 531, in asarray
return array(a, dtype, copy=False, order=order)
ValueError: could not convert string to float: 'setosa'
Process finished with exit code 1
I would like to know which page you are using as tutorial for this. Because the first page which comes when searching in google is this:
https://www.tensorflow.org/get_started/tflearn
And the difference between this and what you posted is tf.contrib.learn.datasets.base.load_csv_without_header and tf.contrib.learn.datasets.base.load_csv_with_header.
The actual URL or iris data you have specified contains the header. And you are trying to load it as a file without the header. Hence the strings in the header are not able to get converted to float and the error.
Change your code to:
training_set = tf.contrib.learn.datasets.base.load_csv_with_header(
filename=IRIS_TRAINING,
target_dtype=np.int,
features_dtype=np.float32)
test_set = tf.contrib.learn.datasets.base.load_csv_with_header(
filename=IRIS_TEST,
target_dtype=np.int,
features_dtype=np.float32)
I am confused why the following code returns this error message:
Traceback (most recent call last):
File "/Users/Desktop/TestPython/tftest.py", line 46, in <module>
main(sys.argv[1:])
File "/Users/Desktop/TestPython/tftest.py", line 35, in main
result = tf.while_loop(Cond_f2, Body_f1, loop_vars=loopvars)
File "/Users/Desktop/HPC_LIB/TENSORFLOW/lib/python2.7/site-packages/tensorflow/python/ops/control_flow_ops.py", line 2518, in while_loop
result = context.BuildLoop(cond, body, loop_vars, shape_invariants)
File "/Users/Desktop/HPC_LIB/TENSORFLOW/lib/python2.7/site-packages/tensorflow/python/ops/control_flow_ops.py", line 2356, in BuildLoop
pred, body, original_loop_vars, loop_vars, shape_invariants)
File "/Users/Desktop/HPC_LIB/TENSORFLOW/lib/python2.7/site-packages/tensorflow/python/ops/control_flow_ops.py", line 2292, in _BuildLoop
c = ops.convert_to_tensor(pred(*packed_vars))
File "/Users/Desktop/TestPython/tftest.py", line 18, in Cond_f2
boln = tf.less(tf.cast(tf.constant(ind), dtype=tf.int32), tf.cast(tf.constant(N), dtype=tf.int32))
File "/Users/Desktop/HPC_LIB/TENSORFLOW/lib/python2.7/site-packages/tensorflow/python/framework/constant_op.py", line 163, in constant
tensor_util.make_tensor_proto(value, dtype=dtype, shape=shape))
File "/Users/Desktop/HPC_LIB/TENSORFLOW/lib/python2.7/site-packages/tensorflow/python/framework/tensor_util.py", line 353, in make_tensor_proto
_AssertCompatible(values, dtype)
File "/Users/Desktop/HPC_LIB/TENSORFLOW/lib/python2.7/site-packages/tensorflow/python/framework/tensor_util.py", line 287, in _AssertCompatible
raise TypeError("List of Tensors when single Tensor expected")
TypeError: List of Tensors when single Tensor expected
I would appreciate if someone could help me fix this error. Thanks!
from math import *
import numpy as np
import sys
import tensorflow as tf
def Body_f1(n, ind, N, T):
# Compute trace
a = tf.trace(tf.random_normal(0.0, 1.0, (n, n)))
# Update trace
a = tf.cast(a, dtype=T.dtype)
T = tf.scatter_update(T, ind, a)
# Update index
ind = ind + 1
return n, ind, N, T
def Cond_f2(n, ind, N, T):
boln = tf.less(tf.cast(tf.constant(ind), dtype=tf.int32), tf.cast(tf.constant(N), dtype=tf.int32))
return boln
def main(argv):
# Open tensorflow session
sess = tf.Session()
# Parameters
N = 10
T = tf.zeros((N), dtype=tf.float64)
n = 4
ind = 0
# While loop
loopvars = [n, ind, N, T]
result = tf.while_loop(Cond_f2, Body_f1, loop_vars=loopvars, shape_invariants=None, \
parallel_iterations=1, back_prop=False, swap_memory=False, name=None)
trace = result[3]
trace = sess.run(trace)
print trace
print 'Done!'
# Close tensorflow session
if session==None:
sess.close()
if __name__ == "__main__":
main(sys.argv[1:])
Update: I have added the full error message. I am not sure why I get this error message. Does loop_vars expect a single tensor and not a list of tensors? I hope not.
tf.constant expects a non-Tensor value, like a Python list or a numpy array. You can get the same error by iterating tf.constant, as in tf.constant(tf.constant(5.)). Removing those calls fixes that first error. It's a very poor error message, so I would encourage you to file a bug on Github.
It also looks like the arguments to random_normal are a bit mixed up; keyword arguments are good for avoiding issues like that:
tf.random_normal(mean=0.0, stddev=1.0, shape=(n, n))
Finally scatter_update expects a variable. It looks like a TensorArray may be what you're looking for here (or one of the higher level looping constructs which use a TensorArray implicitly).