I have implemented a NN for MNIST using TensorFlow. I want to show the result on the TensorBoard. Flowing are screenshots of the TensorBoard that I have implemented. But the IMAGES page shows "No image data was found".
What information should be shown here? I should ignore it?
CODE
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
tf.reset_default_graph()
mnist = input_data.read_data_sets('data', one_hot=True)
batch_size = 100
learning_rate = 0.5
training_epochs = 5
logs_path = "C:/tmp/mlp"
with tf.name_scope('input'):
x = tf.placeholder(tf.float32, shape=[None, 784], name="x-input")
y_ = tf.placeholder(tf.float32, shape=[None, 10], name="y-input")
with tf.name_scope("weights"):
W = tf.Variable(tf.zeros([784, 10]))
with tf.name_scope("biases"):
b = tf.Variable(tf.zeros([10]))
with tf.name_scope("softmax"):
y = tf.nn.softmax(tf.matmul(x, W) + b)
with tf.name_scope('cross_entropy'):
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
with tf.name_scope('train'):
train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
with tf.name_scope('Accuracy'):
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar("cost", cross_entropy)
tf.summary.scalar("accuracy", accuracy)
summary_op = tf.summary.merge_all()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
summary_writer = tf.summary.FileWriter("C:/tmp/mlp", sess.graph)
for epoch in range(training_epochs):
batch_count = int(mnist.train.num_examples / batch_size)
for i in range(batch_count):
batch_x, batch_y = mnist.train.next_batch(batch_size)
_, summary = sess.run([train_op, summary_op], feed_dict={x: batch_x, y_: batch_y})
summary_writer.add_summary(summary, epoch * batch_count + i)
if epoch % 5 == 0:
print("Epoch: ", epoch)
print("Accuracy: ", accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
print("done")
The only lines in your code that refer to a summary operation are:
tf.summary.scalar("cost", cross_entropy)
tf.summary.scalar("accuracy", accuracy)
These lines create 2 scalar summaries (and add the created summary to a default collection that contains every defined summary).
You're not defining any image summary (with tf.summmary.image) thus that tab in tensorboard will be empty.
Just ignore them, Because you did save any tf.summary.image summary, Tensorboard won't show anything in this tab;
Related
I have the tf.event files present in folder, I input the command to view but yet I am not able to see the graph
Please find the code attached, the code related to graph is provided.
I am using tensorflow 1.8, upgrading had lot of issues, so i am using lower version.
#Initialize the FileWriter
with tf.Session() as sess:
writer = tf.summary.FileWriter("./Training_FileWriter/", sess.graph)
writer1 = tf.summary.FileWriter("./Validation_FileWriter/", sess.graph)
#Add the cost and accuracy to summary
tf.summary.scalar('loss', tf.squeeze(cross_entropy))
tf.summary.scalar('accuracy', tf.squeeze(accuracy))
#Merge all summaries together
merged_summary = tf.summary.merge_all()
#
#
#After executing loss, optimizer, accuracy
summ = sess.run(merged_summary, feed_dict=feed_dict_train)
writer.add_summary(summ, epoch*int(len(trainLabels)/batch_size) + batch)
Will it help if you have a full-fledged example like this ? I am able to view the graphs.
tensorboard --logdir=D:\Development_Avecto\TensorFlow\logs\1\train
TensorBoard 1.9.0 at http://LT032871:6006 (Press CTRL+C to quit)
import tensorflow as tf
# reset everything to rerun in jupyter
tf.reset_default_graph()
# config
batch_size = 100
learning_rate = 0.5
training_epochs = 5
logs_path = "D:/Development_Avecto/TensorFlow/logs/1/train"
# load mnist data set
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
# input images
with tf.name_scope('input'):
# None -> batch size can be any size, 784 -> flattened mnist image
x = tf.placeholder(tf.float32, shape=[None, 784], name="x-input")
# target 10 output classes
y_ = tf.placeholder(tf.float32, shape=[None, 10], name="y-input")
# model parameters will change during training so we use tf.Variable
with tf.name_scope("weights"):
W = tf.Variable(tf.zeros([784, 10]))
# bias
with tf.name_scope("biases"):
b = tf.Variable(tf.zeros([10]))
# implement model
with tf.name_scope("softmax"):
# y is our prediction
y = tf.nn.softmax(tf.matmul(x, W) + b)
# specify cost function
with tf.name_scope('cross_entropy'):
# this is our cost
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
# specify optimizer
with tf.name_scope('train'):
# optimizer is an "operation" which we can execute in a session
train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
with tf.name_scope('Accuracy'):
# Accuracy
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# create a summary for our cost and accuracy
tf.summary.scalar("cost", cross_entropy)
tf.summary.scalar("accuracy", accuracy)
# merge all summaries into a single "operation" which we can execute in a session
summary_op = tf.summary.merge_all()
with tf.Session() as sess:
# variables need to be initialized before we can use them
sess.run(tf.initialize_all_variables())
# create log writer object
writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())
# perform training cycles
for epoch in range(training_epochs):
# number of batches in one epoch
batch_count = int(mnist.train.num_examples / batch_size)
for i in range(batch_count):
batch_x, batch_y = mnist.train.next_batch(batch_size)
# perform the operations we defined earlier on batch
_, summary = sess.run([train_op, summary_op], feed_dict={x: batch_x, y_: batch_y})
# write log
writer.add_summary(summary, epoch * batch_count + i)
if epoch % 5 == 0:
print
"Epoch: ", epoch
print
"Accuracy: ", accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels})
print
"done"
I've trained a TF Random Forest Classifier similar to the following code:
X = tf.placeholder(tf.float32, shape=[None, num_features])
Y = tf.placeholder(tf.int32, shape=[None])
hparams = tensor_forest.ForestHParams(num_classes=num_classes,
num_features=num_features,
num_trees=num_trees).fill()
forest_graph = tensor_forest.RandomForestGraphs(hparams)
train_op = forest_graph.training_graph(X, Y)
loss_op = forest_graph.training_loss(X, Y)
infer_op, _, _ = forest_graph.inference_graph(X)
correct_prediction = tf.equal(tf.argmax(infer_op, 1), tf.cast(Y,tf.int64))
accuracy_op = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
init_vars = tf.group(tf.global_variables_initializer(),
resources.initialize_resources(resources.shared_resources()))
with tf.Session() as sess:
sess.run(init_vars)
saver = tf.train.Saver()
for i in range(1, 100):
for batch_x, batch_y in render_batch(batch_size):
_, l = sess.run([train_op, loss_op], feed_dict={X: batch_x, Y: batch_y})
acc = sess.run(accuracy_op, feed_dict={X: batch_x, Y: batch_y})
print('Step %i, Loss: %f, Acc: %f' % (i, l, acc))
if acc >= 0.87:
print("Stopping and saving")
save_path = saver.save(sess, model_path)
print("Model saved in file: %s" % save_path)
break
Now I want to reload my model and use it to make predictions on unseen data like this:
with graph.as_default():
session_conf = tf.ConfigProto()
sess = tf.Session(config = session_conf)
with sess.as_default():
saver = tf.train.import_meta_graph("{}.meta".format(model_path))
saver.restore(sess,checkpoint_file)
accuracy_op = graph.get_operation_by_name("accuracy_op").outputs[0]
print(sess.run(accuracy_op, feed_dict={X: x_test, Y: y_test}))
However, I get the following error message:
KeyError: "The name 'accuracy_op' refers to an Operation not in the graph."
My question is - how can I save my model such that when I reload it, I can import those operations defined above and use them on unseen data?
Thanks!
Since you are using get_operation_by_name, you should have named the op accuracy_op. You can do it by using tf.identity:
accuracy_op = tf.identity(tf.reduce_mean(tf.cast(correct_prediction, tf.float32)), 'accuracy_op')
I see that you are using tensors X and Y without loading from the new graph. So name the tensors in the original code and then reload using the get_tensor_by_name()
I am running Multi GPU CIFAR10 example. I observed that as I am increasing the number of GPU in the example, time taken to train is increasing.
nvidia-smi -l 1 command shows the expected utilization and behavior of the GPUs, but the time taken to train model is taking more with more number of GPUs which is unexpected.
I don't know if I am missing any configuration settings before running the example.
I also tried to run MNIST on multi GPUs and I faced similar problem with GPU. Basically I was trying to collect some statistics for multi GPU.
As I am increasing number of GPUs by giving values for i in xrange(num_gpus): I am seeing more time is being taken. Is anything wrong with code?
start_time = time.time()
def train():
with tf.device('/cpu:0'):
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
#y = tf.nn.softmax(tf.matmul(x, W) + b)
y_ = tf.placeholder(tf.float32, [None, 10])
global_step = tf.get_variable( 'global_step', [], initializer=tf.constant_initializer(0), trainable=False)
op = tf.train.GradientDescentOptimizer(0.5)
for i in xrange(4):
with tf.device('/gpu:%d' % i):
with tf.name_scope('%s_%d' % (TOWER_NAME, i)) as scope:
#batch_xs, batch_ys = mnist.train.next_batch(100)
#batch_xs, batch_ys = queue.dequeue_many(100)
y = tf.nn.softmax(tf.matmul(x, W) + b)
#print(batch_xs)
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
tower_gradient = op.compute_gradients(cross_entropy)
tower_grads.append(tower_gradient)
grads = average_gradients(tower_grads)
apply_gradient_op = op.apply_gradients(grads, global_step=global_step)
sess = tf.InteractiveSession(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True))
#coord = tf.train.Coordinator()
#enqueue_threads = qr.create_threads(sess, coord=coord, start=True)
tf.global_variables_initializer().run()
for _ in range(1000):
data_batch, label_batch = mnist.train.next_batch(100)
#data_batch, label_batch = sess.run([batch_xs,batch_ys])
sess.run(apply_gradient_op,feed_dict={x:data_batch, y_:label_batch})
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
#coord.request_stop()
#coord.join(enqueue_threads)
sess.close()
train()
print("--- %s seconds ---" % (time.time() - start_time))
Thanks & Regards
I want to test a tensorflow classifier with several optimizers. With this code :
optimizers = [
tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy),
tf.train.AdadeltaOptimizer(learning_rate).minimize(cross_entropy),
tf.train.AdagradOptimizer(learning_rate).minimize(cross_entropy),
tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy),
tf.train.FtrlOptimizer(learning_rate).minimize(cross_entropy),
tf.train.ProximalGradientDescentOptimizer(learning_rate).minimize(cross_entropy),
tf.train.ProximalAdagradOptimizer(learning_rate).minimize(cross_entropy),
tf.train.RMSPropOptimizer(learning_rate).minimize(cross_entropy)]
for optimizer in optimizers:
print(optimizer)
I got this error :
TypeError: init() missing 1 required positional argument: 'name'
Any help please.
Following the MNIST tutorial on tensorflow.org and combining this with your array of optimizers I can obtain all accuracy rates. The error message you get seems to come from a different place.
Code:
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
learning_rate = 0.5
optimizers = [
tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy),
tf.train.AdadeltaOptimizer(learning_rate).minimize(cross_entropy),
tf.train.AdagradOptimizer(learning_rate).minimize(cross_entropy),
tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy),
tf.train.FtrlOptimizer(learning_rate).minimize(cross_entropy),
tf.train.ProximalGradientDescentOptimizer(learning_rate).minimize(cross_entropy),
tf.train.ProximalAdagradOptimizer(learning_rate).minimize(cross_entropy),
tf.train.RMSPropOptimizer(learning_rate).minimize(cross_entropy)]
for optimizer in optimizers:
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
for _ in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(optimizer, feed_dict={x: batch_xs, y_: batch_ys})
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
Output:
0.9157
0.8832
0.9169
0.098
0.917
0.9149
0.917
0.098
I recently started interested in deep learning. I have copied the Tensorflow Beginner tutorial I get the syntax error and can't run the script
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
import tensorflow as tf
x = tf.placeholder(tf.float32, [None,784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
#TRAINING
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y).reduction_indices = [1]
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
#(SYNTAX ERROR IN "P" OF TRAIN_STEP)
sess = tf.InteractiveSession()
tf.global_variable_initializer().run()
for _ in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict = {x: batch_xs, y_: batch_ys})
#EVALUATION
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
Why this happens and how to fix it so I can continue with my learnings.
Take a look at this line:
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y).reduction_indices = [1]
You should notice that the brackets don't close, this is not a valid statement.
Can you try to recopy the code you found online and carefully check what you copied?
Good luck!