using saved model to make prections in tensorflow - tensorflow

i have this code to train a restore a model in tensorflow . but how can I make predictions.
def train_neural_network(x):
prediction=neural_network_model(x)
cost=tf.nn.softmax_cross_entropy_with_logits_v2(logits = prediction, labels = y)
optimizer=tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)
saver = tf.train.Saver()
with tf.Session() as sess:
#sess.run(tf.initialize_all_variables())
sess.run(tf.global_variables_initializer())
for epoch in range(hm_epochs):
epoch_loss = 0
i = 0
#while i < len(train_x):
t = len(train_x)
f = t%batch_size
while i < (t-f):
start = i
end = i+batch_size
batch_x = np.array(train_x[start:end])
batch_y = np.array(train_y[start:end])
_, c = sess.run([optimizer, cost], feed_dict={x: batch_x, y: batch_y})
epoch_loss += c
#epoch_loss = epoch_loss + c
i+=batch_size
#i = i + batch_size
print('Epoch =', epoch+1, '/',hm_epochs,'loss:',epoch_loss)
save_path = saver.save(sess, "sesionestensorflow/model1.ckpt")
print("Model saved in path: %s" % save_path)
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print('Accuracy:',accuracy.eval({x:test_x, y:test_y}))
i see this answer but i can`t make the prediction.
Using saved model for prediction in tensorflow

You just have to create an empy graph, define the network, load the saved weights and thus run inference.
prediction = tf.argmax(neural_network_model(x), 1)
saver = tf.train.Saver()
with tf.Session() as sess:
# load the trained weights into the model
saver.restore(sess, "sesionestensorflow/model1.ckpt")
# just use the model
out = sess.run(prediction, feed_dict={x: <your input> })

Related

tensorflow 1x why save session run optimizer as a variable?

I am learning tensorflow from a DataCamp tutorial, at the final training phase I have this code below. My question is the line
opt = sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
why are we saving sess.run(optimizer, ...) as a variable? the variable "opt" isn't used again anywhere... some explanation as to how tensorflow's optimizer works under the hood would be awesome thanks!
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
training_iters = 20
saver = tf.train.Saver()
# tf.reset_default_graph()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
train_loss = []
test_loss = []
train_accuracy = []
test_accuracy = []
summary_writer = tf.summary.FileWriter('./Output', sess.graph)
for i in range(training_iters):
for batch in range(len(train_X)//batch_size):
batch_x = train_X[batch*batch_size:min((batch+1)*batch_size,len(train_X))]
batch_y = train_y[batch*batch_size:min((batch+1)*batch_size,len(train_y))]
opt = sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
# print(type(opt)) # class 'NoneType'
loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x, y: batch_y})
print("Iter " + str(i) + \
", Loss= " + "{:.6f}".format(loss) + \
", Training Accuracy= " + "{:.5f}".format(acc)) # prints the loss and training accuracy after each epoch
print("Optimization Finished!")
# Calculate accuracy for all 10000 mnist test images
test_acc, valid_loss = sess.run([accuracy,cost], feed_dict={x: val_X, y: val_y})
train_loss.append(loss)
test_loss.append(valid_loss)
train_accuracy.append(acc)
test_accuracy.append(test_acc)
print("Testing Accuracy:","{:.5f}".format(test_acc))
saved_path = saver.save(sess, './my-model', global_step=training_iters)
summary_writer.close()

Training/Test data in tensorflow example

I'm new in tensorflow and i follow example at here, but i have one question.
Codes are as follows:
import numpy as np
import tensorflow as tf
from time import time
import math
from include.data import get_data_set
from include.model import model, lr
train_x, train_y = get_data_set("train")
test_x, test_y = get_data_set("test")
x, y, output, y_pred_cls, global_step, learning_rate = model()
global_accuracy = 0
# PARAMS
_BATCH_SIZE = 128
_EPOCH = 60
_SAVE_PATH = "./tensorboard/cifar-10-v1.0.0/"
# LOSS AND OPTIMIZER
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=output, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,
beta1=0.9,
beta2=0.999,
epsilon=1e-08).minimize(loss, global_step=global_step)
# PREDICTION AND ACCURACY CALCULATION
correct_prediction = tf.equal(y_pred_cls, tf.argmax(y, axis=1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# SAVER
merged = tf.summary.merge_all()
saver = tf.train.Saver()
sess = tf.Session()
train_writer = tf.summary.FileWriter(_SAVE_PATH, sess.graph)
try:
print("\nTrying to restore last checkpoint ...")
last_chk_path = tf.train.latest_checkpoint(checkpoint_dir=_SAVE_PATH)
saver.restore(sess, save_path=last_chk_path)
print("Restored checkpoint from:", last_chk_path)
except ValueError:
print("\nFailed to restore checkpoint. Initializing variables instead.")
sess.run(tf.global_variables_initializer())
def train(epoch):
batch_size = int(math.ceil(len(train_x) / _BATCH_SIZE))
i_global = 0
for s in range(batch_size):
batch_xs = train_x[s*_BATCH_SIZE: (s+1)*_BATCH_SIZE]
batch_ys = train_y[s*_BATCH_SIZE: (s+1)*_BATCH_SIZE]
start_time = time()
i_global, _, batch_loss, batch_acc = sess.run(
[global_step, optimizer, loss, accuracy],
feed_dict={x: batch_xs, y: batch_ys, learning_rate: lr(epoch)})
duration = time() - start_time
if s % 10 == 0:
percentage = int(round((s/batch_size)*100))
bar_len = 29
filled_len = int((bar_len*int(percentage))/100)
bar = '=' * filled_len + '>' + '-' * (bar_len - filled_len)
msg = "Global step: {:>5} - [{}] {:>3}% - acc: {:.4f} - loss: {:.4f} - {:.1f} sample/sec"
print(msg.format(i_global, bar, percentage, batch_acc, batch_loss, _BATCH_SIZE / duration))
test_and_save(i_global, epoch)
def test_and_save(_global_step, epoch):
global global_accuracy
i = 0
predicted_class = np.zeros(shape=len(test_x), dtype=np.int)
while i < len(test_x):
j = min(i + _BATCH_SIZE, len(test_x))
batch_xs = test_x[i:j, :]
batch_ys = test_y[i:j, :]
predicted_class[i:j] = sess.run(
y_pred_cls,
feed_dict={x: batch_xs, y: batch_ys, learning_rate: lr(epoch)}
)
i = j
correct = (np.argmax(test_y, axis=1) == predicted_class)
acc = correct.mean()*100
correct_numbers = correct.sum()
mes = "\nEpoch {} - accuracy: {:.2f}% ({}/{})"
print(mes.format((epoch+1), acc, correct_numbers, len(test_x)))
if global_accuracy != 0 and global_accuracy < acc:
summary = tf.Summary(value=[
tf.Summary.Value(tag="Accuracy/test", simple_value=acc),
])
train_writer.add_summary(summary, _global_step)
saver.save(sess, save_path=_SAVE_PATH, global_step=_global_step)
mes = "This epoch receive better accuracy: {:.2f} > {:.2f}. Saving session..."
print(mes.format(acc, global_accuracy))
global_accuracy = acc
elif global_accuracy == 0:
global_accuracy = acc
print("###########################################################################################################")
def main():
for i in range(_EPOCH):
print("\nEpoch: {0}/{1}\n".format((i+1), _EPOCH))
train(i)
if __name__ == "__main__":
main()
sess.close()
In this example, i think, both test and traning data feeds networks, normally only train data must feed network. I can not see any difference between train() and test_and_save() functions. Am i wrong? Thanks
Here is an explanation if I understood your question correctly. The train function is called every epoch and iterates through the training data. At the end of the epoch the test_and_save function is called where the model accuracy is evaluated. This iterates through the test data on the learned weights and calculates the accuracy and saves the model. This is repeated _EPOCH times.
Edit: The model is saved in the test_and_save function. However, the weights are only updated (gradients calculated) when optimizer is passed through sess.run() in the train function. In the test_and_save function the test data is fed to the network however only y_pred_cls is evaluated by passing to sess.run().

Loading operations from saved TensorFlow RandomForest Classifier

I've trained a TF Random Forest Classifier similar to the following code:
X = tf.placeholder(tf.float32, shape=[None, num_features])
Y = tf.placeholder(tf.int32, shape=[None])
hparams = tensor_forest.ForestHParams(num_classes=num_classes,
num_features=num_features,
num_trees=num_trees).fill()
forest_graph = tensor_forest.RandomForestGraphs(hparams)
train_op = forest_graph.training_graph(X, Y)
loss_op = forest_graph.training_loss(X, Y)
infer_op, _, _ = forest_graph.inference_graph(X)
correct_prediction = tf.equal(tf.argmax(infer_op, 1), tf.cast(Y,tf.int64))
accuracy_op = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
init_vars = tf.group(tf.global_variables_initializer(),
resources.initialize_resources(resources.shared_resources()))
with tf.Session() as sess:
sess.run(init_vars)
saver = tf.train.Saver()
for i in range(1, 100):
for batch_x, batch_y in render_batch(batch_size):
_, l = sess.run([train_op, loss_op], feed_dict={X: batch_x, Y: batch_y})
acc = sess.run(accuracy_op, feed_dict={X: batch_x, Y: batch_y})
print('Step %i, Loss: %f, Acc: %f' % (i, l, acc))
if acc >= 0.87:
print("Stopping and saving")
save_path = saver.save(sess, model_path)
print("Model saved in file: %s" % save_path)
break
Now I want to reload my model and use it to make predictions on unseen data like this:
with graph.as_default():
session_conf = tf.ConfigProto()
sess = tf.Session(config = session_conf)
with sess.as_default():
saver = tf.train.import_meta_graph("{}.meta".format(model_path))
saver.restore(sess,checkpoint_file)
accuracy_op = graph.get_operation_by_name("accuracy_op").outputs[0]
print(sess.run(accuracy_op, feed_dict={X: x_test, Y: y_test}))
However, I get the following error message:
KeyError: "The name 'accuracy_op' refers to an Operation not in the graph."
My question is - how can I save my model such that when I reload it, I can import those operations defined above and use them on unseen data?
Thanks!
Since you are using get_operation_by_name, you should have named the op accuracy_op. You can do it by using tf.identity:
accuracy_op = tf.identity(tf.reduce_mean(tf.cast(correct_prediction, tf.float32)), 'accuracy_op')
I see that you are using tensors X and Y without loading from the new graph. So name the tensors in the original code and then reload using the get_tensor_by_name()

tensorflow save model lose the variable

it is a neural network of 12 layer full connect and 2 layer CNN. I have a sample to training it and want to save the model during the training process.But when I want to use the saving model to predict my new sample, I find that the model didn't save the whole variable.
Here are apart of my save model code.
W_hidden_4 = tf.Variable(weight_initializer([n_neurons_3,n_neurons_4]))
bias_hidden_4 = tf.Variable(bias_initializer([n_neurons_4]))
hidden_4 = tf.nn.relu(tf.add(tf.matmul(hidden_3,W_hidden_4),bias_hidden_4))
# Output layer: Variables for output weights and biases
W_out = tf.Variable(weight_initializer([n_neurons_6,n_rot]),name="W_out")
bias_out = tf.Variable(bias_initializer([n_rot]),name="bias_out")
out = tf.add(tf.matmul(hiddens['hidden_14'], W_out), bias_out,name="out")
# Cost function
# tf.reduce_mean:count the average value
mse = tf.reduce_mean(tf.squared_difference(out, Y))
opt = tf.train.AdamOptimizer().minimize(mse)
# Run initializer
net.run(tf.global_variables_initializer())
for e in range(epochs):
print(str(e) + ":")
# Shuffle training data
shuffle_indices = np.random.permutation(np.arange(len(y_train)))
X_train = X_train[shuffle_indices]
y_train = y_train[shuffle_indices]
# Minibatch training
for i in range(0, len(y_train) // batch_size):
start = i * batch_size
batch_x = X_train[start:start + batch_size]
batch_y = y_train[start:start + batch_size]
# Run optimizer with batch
net.run(opt, feed_dict={X: batch_x, Y: batch_y})
# Show progress
if np.mod(i, 50) == 0:
# Prediction
pred = net.run(out, feed_dict={X: X_test})
mse_final = net.run(mse, feed_dict={X: batch_x, Y: batch_y})
print(mse_final)
if e%50 == 0:
model_path = "/home/student/fulldata/src/fc_bigpara/model_" + str(e/50)
save_path = saver.save(net, model_path)
and the following is my restore code
X_test = np.loadtxt('test_x.txt')
sess = tf.Session()
# First let's load meta graph and restore weights
saver =tf.train.import_meta_graph('/home/student/fulldata/src/old_model/fc_bigpara_14/model_19.0.meta')
#all_vars = tf.trainable_variables()
#for v in all_vars:
# print(v.name)
#print v.name,v.eval(self.sess)
saver.restore(sess,
"/home/student/fulldata/src/old_model/fc_bigpara_14/model_19.0")
all_vars = tf.trainable_variables()
for v in all_vars:
#print(v.name, v.eval(sess))
print(v.name)
print(v.shape)
# Now, let's access and create placeholders variables and
# create feed-dict to feed new data
graph = tf.get_default_graph()
X = tf.placeholder(dtype=tf.float32, shape=[None, 3])
Y = tf.placeholder(dtype=tf.float32, shape=[None, 6])
out=graph.get_tensor_by_name("Variable_25:0")
#todo
with open("result.txt","w") as f:
#for i in range(0, len(X_test)):
#start=i*batch_size
#batch_x = X_test[start:start + batch_size]
#batch_x=X_test[i]
feed_dict={X:X_test}
result=sess.run(out,feed_dict)
#print(result.shape)
I can't find the parameter "out" in the model't variable and I have add "name ='out'" but it can't work.So I can't run the code following
result=sess.run(out,feed_dict)
how can I modify my code to fix the bug??

TensorBoard shows No image data was found

I have implemented a NN for MNIST using TensorFlow. I want to show the result on the TensorBoard. Flowing are screenshots of the TensorBoard that I have implemented. But the IMAGES page shows "No image data was found".
What information should be shown here? I should ignore it?
CODE
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
tf.reset_default_graph()
mnist = input_data.read_data_sets('data', one_hot=True)
batch_size = 100
learning_rate = 0.5
training_epochs = 5
logs_path = "C:/tmp/mlp"
with tf.name_scope('input'):
x = tf.placeholder(tf.float32, shape=[None, 784], name="x-input")
y_ = tf.placeholder(tf.float32, shape=[None, 10], name="y-input")
with tf.name_scope("weights"):
W = tf.Variable(tf.zeros([784, 10]))
with tf.name_scope("biases"):
b = tf.Variable(tf.zeros([10]))
with tf.name_scope("softmax"):
y = tf.nn.softmax(tf.matmul(x, W) + b)
with tf.name_scope('cross_entropy'):
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
with tf.name_scope('train'):
train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
with tf.name_scope('Accuracy'):
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar("cost", cross_entropy)
tf.summary.scalar("accuracy", accuracy)
summary_op = tf.summary.merge_all()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
summary_writer = tf.summary.FileWriter("C:/tmp/mlp", sess.graph)
for epoch in range(training_epochs):
batch_count = int(mnist.train.num_examples / batch_size)
for i in range(batch_count):
batch_x, batch_y = mnist.train.next_batch(batch_size)
_, summary = sess.run([train_op, summary_op], feed_dict={x: batch_x, y_: batch_y})
summary_writer.add_summary(summary, epoch * batch_count + i)
if epoch % 5 == 0:
print("Epoch: ", epoch)
print("Accuracy: ", accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
print("done")
The only lines in your code that refer to a summary operation are:
tf.summary.scalar("cost", cross_entropy)
tf.summary.scalar("accuracy", accuracy)
These lines create 2 scalar summaries (and add the created summary to a default collection that contains every defined summary).
You're not defining any image summary (with tf.summmary.image) thus that tab in tensorboard will be empty.
Just ignore them, Because you did save any tf.summary.image summary, Tensorboard won't show anything in this tab;