Tensorflow Summary Scalar is not being displayed - tensorflow

I am using the following code to produce scalar graphs for my accuracy and cost, but the scalar summaries are not being displayed at the tensorboard. It gives me an error saying No scalar data was found. Can someone have a look please? code for the model:
def train_neural_network(x):
prediction = convolutional_neural_network(x)
merged_summary_op = tf.summary.merge_all()
with tf.name_scope("cost"):
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=1e-3).minimize(cost)
tf.summary.scalar("cost", cost)
hm_epochs = 10
with tf.Session() as sess:
writer = tf.summary.FileWriter('C:/Thesis/Conv3d/69', sess.graph)
sess.run(tf.initialize_all_variables())
successful_runs = 0
total_runs = 0
for epoch in range(hm_epochs):
epoch_loss = 0
for data in train_data:
total_runs += 1
try:
X = data[0]
Y = data[1]
_, c = sess.run([optimizer, cost], feed_dict={x: X, y: Y})
writer.add_summary(summary, global_step=epoch)
epoch_loss += c
successful_runs += 1
except Exception as e:
pass
print('Epoch', epoch + 1, 'completed out of', hm_epochs, 'loss:', epoch_loss)
with tf.name_scope("accuracy"):
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
tf.summary.scalar("accuracy", accuracy)
print('Accuracy:', accuracy.eval({x: [i[0] for i in validation_data], y: [i[1] for i in validation_data]}))
print('Done. Finishing accuracy:')
print('Accuracy:', accuracy.eval({x: [i[0] for i in validation_data], y: [i[1] for i in validation_data]}))
print('fitment percent:', successful_runs / total_runs)

You need to call merge_all after defining the summary ops. What happens right now is that the op simply doesn't have anything to summarize (since it's called first) and unfortunately isn't "smart" enough to add summary ops that are defined later.
Note that I also "fixed" the code in that you generally shouldn't run TF ops in a loop; I moved all the accuracy stuff before the training loop.
def train_neural_network(x):
prediction = convolutional_neural_network(x)
with tf.name_scope("cost"):
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=1e-3).minimize(cost)
tf.summary.scalar("cost", cost)
with tf.name_scope("accuracy"):
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
tf.summary.scalar("accuracy", accuracy)
merged_summary_op = tf.summary.merge_all()
hm_epochs = 10
with tf.Session() as sess:
writer = tf.summary.FileWriter('C:/Thesis/Conv3d/69', sess.graph)
sess.run(tf.initialize_all_variables())
successful_runs = 0
total_runs = 0
for epoch in range(hm_epochs):
epoch_loss = 0
for data in train_data:
total_runs += 1
try:
X = data[0]
Y = data[1]
_, c = sess.run([optimizer, cost], feed_dict={x: X, y: Y})
writer.add_summary(summary, global_step=epoch)
epoch_loss += c
successful_runs += 1
except Exception as e:
pass
print('Epoch', epoch + 1, 'completed out of', hm_epochs, 'loss:', epoch_loss)
print('Accuracy:', accuracy.eval({x: [i[0] for i in validation_data], y: [i[1] for i in validation_data]}))
print('Done. Finishing accuracy:')
print('Accuracy:', accuracy.eval({x: [i[0] for i in validation_data], y: [i[1] for i in validation_data]}))
print('fitment percent:', successful_runs / total_runs)

Related

tensorflow 1x why save session run optimizer as a variable?

I am learning tensorflow from a DataCamp tutorial, at the final training phase I have this code below. My question is the line
opt = sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
why are we saving sess.run(optimizer, ...) as a variable? the variable "opt" isn't used again anywhere... some explanation as to how tensorflow's optimizer works under the hood would be awesome thanks!
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
training_iters = 20
saver = tf.train.Saver()
# tf.reset_default_graph()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
train_loss = []
test_loss = []
train_accuracy = []
test_accuracy = []
summary_writer = tf.summary.FileWriter('./Output', sess.graph)
for i in range(training_iters):
for batch in range(len(train_X)//batch_size):
batch_x = train_X[batch*batch_size:min((batch+1)*batch_size,len(train_X))]
batch_y = train_y[batch*batch_size:min((batch+1)*batch_size,len(train_y))]
opt = sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
# print(type(opt)) # class 'NoneType'
loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x, y: batch_y})
print("Iter " + str(i) + \
", Loss= " + "{:.6f}".format(loss) + \
", Training Accuracy= " + "{:.5f}".format(acc)) # prints the loss and training accuracy after each epoch
print("Optimization Finished!")
# Calculate accuracy for all 10000 mnist test images
test_acc, valid_loss = sess.run([accuracy,cost], feed_dict={x: val_X, y: val_y})
train_loss.append(loss)
test_loss.append(valid_loss)
train_accuracy.append(acc)
test_accuracy.append(test_acc)
print("Testing Accuracy:","{:.5f}".format(test_acc))
saved_path = saver.save(sess, './my-model', global_step=training_iters)
summary_writer.close()

using saved model to make prections in tensorflow

i have this code to train a restore a model in tensorflow . but how can I make predictions.
def train_neural_network(x):
prediction=neural_network_model(x)
cost=tf.nn.softmax_cross_entropy_with_logits_v2(logits = prediction, labels = y)
optimizer=tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)
saver = tf.train.Saver()
with tf.Session() as sess:
#sess.run(tf.initialize_all_variables())
sess.run(tf.global_variables_initializer())
for epoch in range(hm_epochs):
epoch_loss = 0
i = 0
#while i < len(train_x):
t = len(train_x)
f = t%batch_size
while i < (t-f):
start = i
end = i+batch_size
batch_x = np.array(train_x[start:end])
batch_y = np.array(train_y[start:end])
_, c = sess.run([optimizer, cost], feed_dict={x: batch_x, y: batch_y})
epoch_loss += c
#epoch_loss = epoch_loss + c
i+=batch_size
#i = i + batch_size
print('Epoch =', epoch+1, '/',hm_epochs,'loss:',epoch_loss)
save_path = saver.save(sess, "sesionestensorflow/model1.ckpt")
print("Model saved in path: %s" % save_path)
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print('Accuracy:',accuracy.eval({x:test_x, y:test_y}))
i see this answer but i can`t make the prediction.
Using saved model for prediction in tensorflow
You just have to create an empy graph, define the network, load the saved weights and thus run inference.
prediction = tf.argmax(neural_network_model(x), 1)
saver = tf.train.Saver()
with tf.Session() as sess:
# load the trained weights into the model
saver.restore(sess, "sesionestensorflow/model1.ckpt")
# just use the model
out = sess.run(prediction, feed_dict={x: <your input> })

Loading operations from saved TensorFlow RandomForest Classifier

I've trained a TF Random Forest Classifier similar to the following code:
X = tf.placeholder(tf.float32, shape=[None, num_features])
Y = tf.placeholder(tf.int32, shape=[None])
hparams = tensor_forest.ForestHParams(num_classes=num_classes,
num_features=num_features,
num_trees=num_trees).fill()
forest_graph = tensor_forest.RandomForestGraphs(hparams)
train_op = forest_graph.training_graph(X, Y)
loss_op = forest_graph.training_loss(X, Y)
infer_op, _, _ = forest_graph.inference_graph(X)
correct_prediction = tf.equal(tf.argmax(infer_op, 1), tf.cast(Y,tf.int64))
accuracy_op = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
init_vars = tf.group(tf.global_variables_initializer(),
resources.initialize_resources(resources.shared_resources()))
with tf.Session() as sess:
sess.run(init_vars)
saver = tf.train.Saver()
for i in range(1, 100):
for batch_x, batch_y in render_batch(batch_size):
_, l = sess.run([train_op, loss_op], feed_dict={X: batch_x, Y: batch_y})
acc = sess.run(accuracy_op, feed_dict={X: batch_x, Y: batch_y})
print('Step %i, Loss: %f, Acc: %f' % (i, l, acc))
if acc >= 0.87:
print("Stopping and saving")
save_path = saver.save(sess, model_path)
print("Model saved in file: %s" % save_path)
break
Now I want to reload my model and use it to make predictions on unseen data like this:
with graph.as_default():
session_conf = tf.ConfigProto()
sess = tf.Session(config = session_conf)
with sess.as_default():
saver = tf.train.import_meta_graph("{}.meta".format(model_path))
saver.restore(sess,checkpoint_file)
accuracy_op = graph.get_operation_by_name("accuracy_op").outputs[0]
print(sess.run(accuracy_op, feed_dict={X: x_test, Y: y_test}))
However, I get the following error message:
KeyError: "The name 'accuracy_op' refers to an Operation not in the graph."
My question is - how can I save my model such that when I reload it, I can import those operations defined above and use them on unseen data?
Thanks!
Since you are using get_operation_by_name, you should have named the op accuracy_op. You can do it by using tf.identity:
accuracy_op = tf.identity(tf.reduce_mean(tf.cast(correct_prediction, tf.float32)), 'accuracy_op')
I see that you are using tensors X and Y without loading from the new graph. So name the tensors in the original code and then reload using the get_tensor_by_name()

Tensorflow: Logistic regression cost=NaN

I am trying to implement a logistic regression problem having 9 different targets. When debugging I get
Epoch: 0025 cost= nan
This is how one batch looks like
batch_xs
[[ 3.40000000e+01 3.34000000e+01 9.00000000e-02 3.40000000e+01
4.06858908e+00 0.00000000e+00 3.30000000e+01 4.04000000e+01
1.00000000e-02 3.30000000e+01 4.06858908e+00 1.00000000e+00
3.20000000e+01 4.22000000e+01 7.00000000e-01 3.20000000e+01
4.06858908e+00 2.00000000e+00 3.10000000e+01 4.18000000e+01
5.00000000e-01 3.10000000e+01 4.06858908e+00 3.00000000e+00
3.00000000e+01 3.70000000e+01 0.00000000e+00 3.00000000e+01
4.06858908e+00 4.00000000e+00 2.90000000e+01 3.78000000e+01
2.00000000e-02 2.90000000e+01 4.06858908e+00 5.00000000e+00
2.80000000e+01 3.78000000e+01 2.00000000e-02 2.90000000e+01
4.06858908e+00 6.00000000e+00 4.90000000e+01 4.00000000e+00
1.00000000e+00]]
batch_ys:
[[0 0 0 1 0 0 0 0 0]]
While the originnal y was. I converted it into a (_,9) matrix using convert_y
[[3]]
This is some of the code I use
learning_rate = 0.01
training_epochs = 25
batch_size = 1
display_step = 1
x = tf.placeholder(tf.float32, [None,feature_column_count])
y = tf.placeholder(tf.float32, [None,n_target_classes])
W = tf.Variable(tf.zeros([feature_column_count,n_target_classes]))
b = tf.Variable(tf.zeros([n_target_classes]))
pred = tf.nn.softmax(tf.matmul(x,W)+b)
cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1))
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
init = tf.global_variables_initializer()
def next_batch(num, data, labels):
idx = np.arange(0, len(data))
idx = idx[:num]
data_s = data[idx]
labels_s = labels[idx]
return np.asarray(data_s), np.asarray(labels_s)
def convert_y(size,n_classes,y):
yk = np.zeros((size,n_classes), dtype=np.int)
for a in range(len(y)):
yk[a,y[a]] = 1
return yk
with tf.Session() as sess:
sess.run(init)
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(np.shape(TRAINING_SET.data)[0]/batch_size)
for i in range(total_batch):
features = TRAINING_SET.data
target = TRAINING_SET.target
batch_xs,batch_ys = next_batch(batch_size, features, target)
batch_ys = convert_y(len(batch_ys),n_target_classes,batch_ys)
print(batch_xs)
print(batch_ys)
_, c = sess.run([optimizer, cost], feed_dict={x: batch_xs, y: batch_ys})
avg_cost += c / total_batch
if (epoch+1) % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost))
print("Optimization Finished!")
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print("Test Accuracy:",accuracy.eval({x: TRAINING_SET.data, y: convert_y(len(TRAINING_SET.target),n_target_classes,TRAINING_SET.target)}))
print("Validation Accuracy:",accuracy.eval({x: VALIDATION_SET.data, y: convert_y(len(VALIDATION_SET.target),n_target_classes,VALIDATION_SET.target)}))
Anyone knows where the code issue is?
For regression it's better to use mean_square loss. you may try the following.
Also gradient clipping would help
logits = tf.nn.relu(tf.matmul(x,W)+b)
cost = tf.reduce_mean(tf.square(tf.subtract(y, logits)))
learning_rate = 0.01
momentum = 0.9
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum, use_nesterov=True)
grads_vars = optimizer.compute_gradients(cost)
cliped_grads_vars = []
for gard, var in grads_vars:
grad = tf.clip_by_norm(grad, max_norm=10.0)
clipped_grads_vars.append((gard, var))
train_op = optimizer.apply_gradients(clipped_gards_vars)
....
_, c = sess.run([train_op, cost], feed_dict={x: batch_xs, y: batch_ys})

TensorBoard shows No image data was found

I have implemented a NN for MNIST using TensorFlow. I want to show the result on the TensorBoard. Flowing are screenshots of the TensorBoard that I have implemented. But the IMAGES page shows "No image data was found".
What information should be shown here? I should ignore it?
CODE
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
tf.reset_default_graph()
mnist = input_data.read_data_sets('data', one_hot=True)
batch_size = 100
learning_rate = 0.5
training_epochs = 5
logs_path = "C:/tmp/mlp"
with tf.name_scope('input'):
x = tf.placeholder(tf.float32, shape=[None, 784], name="x-input")
y_ = tf.placeholder(tf.float32, shape=[None, 10], name="y-input")
with tf.name_scope("weights"):
W = tf.Variable(tf.zeros([784, 10]))
with tf.name_scope("biases"):
b = tf.Variable(tf.zeros([10]))
with tf.name_scope("softmax"):
y = tf.nn.softmax(tf.matmul(x, W) + b)
with tf.name_scope('cross_entropy'):
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
with tf.name_scope('train'):
train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
with tf.name_scope('Accuracy'):
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar("cost", cross_entropy)
tf.summary.scalar("accuracy", accuracy)
summary_op = tf.summary.merge_all()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
summary_writer = tf.summary.FileWriter("C:/tmp/mlp", sess.graph)
for epoch in range(training_epochs):
batch_count = int(mnist.train.num_examples / batch_size)
for i in range(batch_count):
batch_x, batch_y = mnist.train.next_batch(batch_size)
_, summary = sess.run([train_op, summary_op], feed_dict={x: batch_x, y_: batch_y})
summary_writer.add_summary(summary, epoch * batch_count + i)
if epoch % 5 == 0:
print("Epoch: ", epoch)
print("Accuracy: ", accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
print("done")
The only lines in your code that refer to a summary operation are:
tf.summary.scalar("cost", cross_entropy)
tf.summary.scalar("accuracy", accuracy)
These lines create 2 scalar summaries (and add the created summary to a default collection that contains every defined summary).
You're not defining any image summary (with tf.summmary.image) thus that tab in tensorboard will be empty.
Just ignore them, Because you did save any tf.summary.image summary, Tensorboard won't show anything in this tab;