to get the gradients of the output with respect to the input,
one can use
grads = tf.gradients(model.output, model.input)
where grads =
[<tf.Tensor 'gradients_81/dense/MatMul_grad/MatMul:0' shape=(?, 18) dtype=float32>]
This is a modell, where there are 18 continous inputs and 1 continous output.
I assume, this is a symbolic expression and that one needs a list of 18 entries to feed it to the tensor, such that it gives out the derivatives as floats.
I would use
Test =[1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0]
with tf.Session() as sess:
alpha = sess.run(grads, feed_dict = {model.input : Test})
print(alpha)
But I get the error
FailedPreconditionError (see above for traceback): Error while reading resource variable dense_2/bias from Container: localhost. This could mean that the variable was uninitialized. Not found: Container localhost does not exist. (Could not find resource: localhost/dense_2/bias)
[[Node: dense_2/BiasAdd/ReadVariableOp = ReadVariableOp[dtype=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:CPU:0"](dense_2/bias)]]
What is wrong?
EDIT:
This is, what has happened before:
def build_model():
model = keras.Sequential([
...])
optimizer = ...
model.compile(loss='mse'... )
return model
model = build_model()
history= model.fit(data_train,train_labels,...)
loss, mae, mse = model.evaluate(data_eval,...)
Progress so far:
Test =[1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0]
with tf.Session() as sess:
tf.keras.backend.set_session(sess)
tf.initializers.variables(model.output)
alpha = sess.run(grads, feed_dict = {model.input : Test})
is also not working, giving the error:
TypeError: Using a `tf.Tensor` as a Python `bool` is not allowed. Use `if t is not None:` instead of `if t:` to test if a tensor is defined, and use TensorFlow ops such as tf.cond to execute subgraphs conditioned on the value of a tensor.
You're trying to use uninitialized variable. All you have to do is add
sess.run(tf.global_variables_initializer())
right after with tf.Session() as sess:
Edit:
You need to register session with Keras
with tf.Session() as sess:
tf.keras.backend.set_session(sess)
And use tf.initializers.variables(var_list) instead of tf.global_variables_initializer()
See https://blog.keras.io/keras-as-a-simplified-interface-to-tensorflow-tutorial.html
Edit:
Test = np.ones((1, 18), dtype=np.float32)
inputs = layers.Input(shape=[18,])
layer = layers.Dense(10, activation='sigmoid')(inputs)
model = tf.keras.Model(inputs=inputs, outputs=layer)
model.compile(optimizer='adam', loss='mse')
checkpointer = tf.keras.callbacks.ModelCheckpoint(filepath='path/weights.hdf5')
model.fit(Test, nb_epoch=1, batch_size=1, callbacks=[checkpointer])
grads = tf.gradients(model.output, model.input)
with tf.Session() as sess:
tf.keras.backend.set_session(sess)
sess.run(tf.global_variables_initializer())
model.load_weights('path/weights.hdf5')
alpha = sess.run(grads, feed_dict={model.input: Test})
print(alpha)
This shows consistent result
Related
So here is the thing: I am trying to use inference from a model that has been frozen to a .pb (ProtoBuf) file.
I have properly frozen the model selecting the nodes that I am interested to use for inference (just the output). I am also able to select the output tensor but when I input the tensors it gives me an error of the like:
InvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'w2' with dtype float
[[Node: w2 = Placeholder[dtype=DT_FLOAT, shape=<unknown>, _device="/job:localhost/replica:0/task:0/device:CPU:0"]()]]
Here is a simple model that I have frozen:
import tensorflow as tf
w1 = tf.placeholder("float", name="w1")
w2 = tf.placeholder("float", name="w2")
b1 = tf.Variable(2.0, name="bias")
feed_dict = {w1: 4, w2: 8}
w3 = tf.add(w1, w2)
w4 = tf.multiply(w3, b1, name="op_to_restore")
sess = tf.Session()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
print(sess.run(w4, feed_dict))
# Prints 24 which is sum of (w1+w2)*b1
saver.save(sess, 'my_test_model/test', global_step=1000)
And here is the code I am using to do the inference (from a .pb file):
w1 = tf.placeholder("float")
w2 = tf.placeholder("float")
with tf.Session() as sess:
init = tf.global_variables_initializer()
with tf.gfile.FastGFile("my_test_model/frozen_model.pb", 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
tensor = sess.graph.get_tensor_by_name('op_to_restore:0')
# sess.run(init)
print(tensor)
predictions = sess.run(tensor, feed_dict={w1: 4, w2: 8})
print(predictions)
Any help will be of great value, thanks!
Just to make a clear answer to this question:
If anyone has this issue.. the fix that worked for me was changing the line: feed_dict={w1: 4, w2: 8} with feed_dict={'w1:0': 4, 'w2:0': 8}, since this nodes were already created. If you want to print the nodes of your graph the line that gets them is:
[n.name for n in tf.get_default_graph().as_graph_def().node]
I have a DNC model built in tensor flow, after training, now I want to test it against test data I tried everything, but it seems that tensor flow is always requiring the training data to feed the tensor.
with tf.Session(graph=graph) as sess:
# initialize input output pairs
tf.initialize_all_variables().run()
final_i_data = X_train
final_o_data = y_train
# for each iteration
for i in range(0, iterations + 1):
# feed in each input output pair
feed_dict = {dnc.i_data: final_i_data, dnc.o_data: final_o_data}
# make predictions
l, _, predictions = sess.run([loss, optimizer, output], feed_dict=feed_dict)
if i % 100 == 0:
print(i, l)
for x in X_test:
x= np.reshape(x,(1,24))
feed_dict= {dnc.tf_test_dataset: x}
predictions = sess.run(test_output, feed_dict=feed_dict)
print(predictions)
I have this error everytime :
InvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'Placeholder' with dtype float and shape [6,24]
[[Node: Placeholder = Placeholder[dtype=DT_FLOAT, shape=[6,24], _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
In my graph I have tf_test_dataset as a placeholder of size (1,24) but the error requires me to feed the placeholder of training data. PLEASE HELP!
I faced a problem with properly restoring the saved model in tensorflow. I created the Bidirectional RNN model in tensorflow with following code:
batchX_placeholder = tf.placeholder(tf.float32, [None, timesteps, 1],
name="batchX_placeholder")])
batchY_placeholder = tf.placeholder(tf.float32, [None, num_classes],
name="batchY_placeholder")
weights = tf.Variable(np.random.rand(2*STATE_SIZE, num_classes),
dtype=tf.float32, name="weights")
biases = tf.Variable(np.zeros((1, num_classes)), dtype=tf.float32,
name="biases")
logits = BiRNN(batchX_placeholder, weights, biases)
with tf.name_scope("prediction"):
prediction = tf.nn.softmax(logits)
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=batchY_placeholder))
lr = tf.Variable(learning_rate, trainable=False, dtype=tf.float32,
name='lr')
optimizer = tf.train.AdamOptimizer(learning_rate=lr)
train_op = optimizer.minimize(loss_op)
init_op = tf.initialize_all_variables()
saver = tf.train.Saver()
The architecture of BiRNN created with the following function:
def BiRNN(x, weights, biases):
# Unstack to get a list of 'time_steps' tensors of shape (batch_size,
# num_input)
x = tf.unstack(x, time_steps, 1)
# Forward and Backward direction cells
lstm_fw_cell = rnn.BasicLSTMCell(STATE_SIZE, forget_bias=1.0)
lstm_bw_cell = rnn.BasicLSTMCell(STATE_SIZE, forget_bias=1.0)
outputs, _, _ = rnn.static_bidirectional_rnn(lstm_fw_cell,
lstm_bw_cell, x, dtype=tf.float32)
# Linear activation, using rnn inner loop last output
return tf.matmul(outputs[-1], weights) + biases
Then I train a model and save it after each 200 steps:
with tf.Session() as sess:
sess.run(init_op)
current_step = 0
for batch_x, batch_y in get_minibatch():
sess.run(train_op, feed_dict={batchX_placeholder: batch_x,
batchY_placeholder: batch_y})
current_step += 1
if current_step % 200 == 0:
saver.save(sess, os.path.join(model_dir, "model")
To run the saved model in inference mode I use saved tensorflow graph in "model.meta" file:
graph = tf.get_default_graph()
saver = tf.train.import_meta_graph(os.path.join(model_dir, "model.meta"))
sess = tf.Session()
saver.restore(sess, tf.train.latest_checkpoint(model_dir)
weights = graph.get_tensor_by_name("weights:0")
biases = graph.get_tensor_by_name("biases:0")
batchX_placeholder = graph.get_tensor_by_name("batchX_placeholder:0")
batchY_placeholder = graph.get_tensor_by_name("batchY_placeholder:0")
logits = BiRNN(batchX_placeholder, weights, biases)
prediction = graph.get_operation_by_name("prediction/Softmax")
argmax_pred = tf.argmax(prediction, 1)
init = tf.global_variables_initializer()
sess.run(init)
for x_seq, y_gt in get_sequence():
_, y_pred = sess.run([prediction, argmax_pred],
feed_dict={batchX_placeholder: [x_seq]],
batchY_placeholder: [[0.0, 0.0]]})
print("Y ground true: " + str(y_gt) + ", Y pred: " + str(y_pred[0]))
And when I run the code in inference mode, I get different results each time I launch it. It seems that output neurons from the softmax layer randomly bundled with different output classes.
So, my question is: How can I save and then correctly restore the model in tensorflow, so that all neurons properly bundled with corresponding output classes?
There is no need to call tf.global_variables_initializer(), I think that is your problem.
I removed some operations: logits, weights and biases since you don't need them, all those are already loaded, use graph.get_tensor_by_name to get them.
For the prediction, get the tensor instead of the operation. (see this answer):
This is the code:
graph = tf.get_default_graph()
saver = tf.train.import_meta_graph(os.path.join(model_dir, "model.meta"))
sess = tf.Session()
saver.restore(sess, tf.train.latest_checkpoint(model_dir))
batchX_placeholder = graph.get_tensor_by_name("batchX_placeholder:0")
batchY_placeholder = graph.get_tensor_by_name("batchY_placeholder:0")
prediction = graph.get_tensor_by_name("prediction/Softmax:0")
argmax_pred = tf.argmax(prediction, 1)
Edit 1: I notice that I wasn't clear on why you got different results.
And when I run the code in inference mode, I get different results
each time I launch it.
Notice that although you used the weights from the loaded model, you are creating the BiRNN again, and the BasicLSTMCell also have weights and other variables that you don't set from your loaded model, hence they need to be initialized (with new random values) resulting in an untrained model again.
I am rather new to tensorflow and am currently experimenting with models of varying complexity. I have a problem with the save and restore functionality of the package. As far as I did understand the tutorials, I should be able to restore a trained graph and run it with some new input at some later point. However, I get the following error when I try to do just that.:
InvalidArgumentError (see above for traceback): Shape [-1,10] has negative dimensions
[[Node: Placeholder = Placeholderdtype=DT_FLOAT, shape=[?,10], _device="/job:localhost/replica:0/task:0/cpu:0"]]
My understanding of the message is that the restored graph does not like one dimension to be left arbitrary, which in turn is necessary for practical cases where I don't know beforehand how large my input will be. A code snippet as a minimal example, producing the error above, can be found below. I know how to restore each tensor individually but this gets impractical pretty quickly when the models grow in complexity. I am thankful for any help I get and apologize in case my question is stupid.
import numpy as np
import tensorflow as tf
def generate_random_input():
alist = []
for _ in range(10):
alist.append(np.random.uniform(-1, 1, 100))
return np.array(alist).T
def generate_random_target():
return np.random.uniform(-1, 1, 100)
x = tf.placeholder('float', [None, 10])
y = tf.placeholder('float')
# the model
w1 = tf.get_variable('w1', [10, 1], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(seed=1))
b1 = tf.get_variable('b1', [1], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(seed=1))
result = tf.add(tf.matmul(x, w1), b1, name='result')
loss = tf.reduce_mean(tf.losses.mean_squared_error(predictions=result, labels=y))
optimizer = tf.train.AdamOptimizer(0.03).minimize(loss)
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run([optimizer, loss], feed_dict={x: generate_random_input(), y: generate_random_target()})
saver.save(sess, 'file_name')
# now load the model in another session:
sess2 = tf.Session()
saver = tf.train.import_meta_graph('file_name.meta')
saver.restore(sess2, tf.train.latest_checkpoint('./'))
graph = tf.get_default_graph()
pred = graph.get_operation_by_name('result')
test_result = sess2.run(pred, feed_dict={x: generate_random_input()})
in the last line, you don't feed_dict the label_palceholder with the data. So in the placeholder, the [-1] dimension is still -1, other than the batch size. That's the cause.
I'm having the exact same problem as you. I'm importing and testing a bunch of different CNNs with different layer sizes and testing on various datasets. You can stick your model creation in a function like so and recreate it in your other code:
def create_model():
x = tf.placeholder('float', [None, 10])
y = tf.placeholder('float')
w1 = tf.get_variable('w1', [10, 1], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(seed=1))
b1 = tf.get_variable('b1', [1], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(seed=1))
result = tf.add(tf.matmul(x, w1), b1, name='result')
return x, y, result
x, y, result = create_model()
loss = tf.reduce_mean(tf.losses.mean_squared_error(predictions=result, labels=y))
optimizer = tf.train.AdamOptimizer(0.03).minimize(loss)
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run([optimizer, loss], feed_dict={x: generate_random_input(), y: generate_random_target()})
saver.save(sess, 'file_name')
# now load the model in another session:
sess2 = tf.Session()
# This stuff is optional if everything is the same scope
x, y, result = create_model()
saver = tf.train.Saver()
# loss = ... if you want loss
# Now just restore the weights and run
saver.restore(sess, 'file_name')
test_result = sess2.run(pred, feed_dict={x: generate_random_input()})
This is a bit tedious if I want to import many complex architectures with different dimensions. For our situation, I don't know if there's any other way to restore an entire model than to recreate that architecture first in your second session.
I have seen a few posts on restoring TF models and the Google doc page on exporting graphs but I think I am missing something.
I use the code in this Gist to save the model along with this utils file to which defines the model
Now I would like to restore it and run in a previously unseen test data as follows:
def evaluate(X_data, y_data):
num_examples = len(X_data)
total_accuracy = 0
total_loss = 0
sess = tf.get_default_session()
acc_steps = len(X_data) // BATCH_SIZE
for i in range(acc_steps):
batch_x, batch_y = next_batch(X_val, Y_val, BATCH_SIZE)
loss, accuracy = sess.run([loss_value, acc], feed_dict={
images_placeholder: batch_x,
labels_placeholder: batch_y,
keep_prob: 0.5
})
total_accuracy += (accuracy * len(batch_x))
total_loss += (loss * len(batch_x))
return (total_accuracy / num_examples, total_loss / num_examples)
## re-execute the code that defines the model
# Image Tensor
images_placeholder = tf.placeholder(tf.float32, shape=[None, 32, 32, 3], name='x')
gray = tf.image.rgb_to_grayscale(images_placeholder, name='gray')
gray /= 255.
# Label Tensor
labels_placeholder = tf.placeholder(tf.float32, shape=(None, 43), name='y')
# dropout Tensor
keep_prob = tf.placeholder(tf.float32, name='drop')
# construct model
logits = inference(gray, keep_prob)
# calculate loss
loss_value = loss(logits, labels_placeholder)
# training
train_op = training(loss_value, 0.001)
# accuracy
acc = accuracy(logits, labels_placeholder)
with tf.Session() as sess:
loader = tf.train.import_meta_graph('gtsd.meta')
loader.restore(sess, tf.train.latest_checkpoint('./'))
sess.run(tf.initialize_all_variables())
test_accuracy = evaluate(X_test, y_test)
print("Test Accuracy = {:.3f}".format(test_accuracy[0]))
I'm getting a test accuracy of only 3%. However If I don't close the Notebook and run the test code immediately after training the model, I get a 95% accuracy.
This leads me to believe I'm not loading the model correctly?
The problem arises from these two lines:
loader.restore(sess, tf.train.latest_checkpoint('./'))
sess.run(tf.initialize_all_variables())
The first line loads the saved model from a checkpoint. The second line re-initializes all of the variables in the model (such as the weight matrices, convolutional filters, and bias vectors), usually to random numbers, and overwrites the loaded values.
The solution is simple: delete the second line (sess.run(tf.initialize_all_variables())) and evaluation will proceed with the trained values loaded from the checkpoint.
PS. There is a small chance that this change will give you an error about "uninitialized variables". In that case, you should execute sess.run(tf.initialize_all_variables()) to initialize any variables not saved in the checkpoint before executing loader.restore(sess, tf.train.latest_checkpoint('./')).
I had a similar problem and for me this worked:
with tf.Session() as sess:
saver=tf.train.Saver(tf.all_variables())
saver=tf.train.import_meta_graph('model.meta')
saver.restore(sess,"model")
test_accuracy = evaluate(X_test, y_test)
The answer found here is what ended up working as follows:
save_path = saver.save(sess, '/home/ubuntu/gtsd-12-23-16.chkpt')
print("Model saved in file: %s" % save_path)
## later re-run code that creates the model
# Image Tensor
images_placeholder = tf.placeholder(tf.float32, shape=[None, 32, 32, 3], name='x')
gray = tf.image.rgb_to_grayscale(images_placeholder, name='gray')
gray /= 255.
# Label Tensor
labels_placeholder = tf.placeholder(tf.float32, shape=(None, 43), name='y')
# dropout Tensor
keep_prob = tf.placeholder(tf.float32, name='drop')
# construct model
logits = inference(gray, keep_prob)
# calculate loss
loss_value = loss(logits, labels_placeholder)
# training
train_op = training(loss_value, 0.001)
# accuracy
acc = accuracy(logits, labels_placeholder)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, '/home/ubuntu/gtsd-12-23-16.chkpt')
print("Model restored.")
test_accuracy = evaluate(X_test, y_test)
print("Test Accuracy = {:.3f}".format(test_accuracy[0]*100))