Can I use TensorBoard also with jupyter notebooks - tensorflow

I am experimenting (learning) TensorBoard and use the following code I got from the internet (simple regression function)
import tensorflow as tf
import numpy as np
#sess = tf.InteractiveSession() #define a session
# Create 100 phony x, y data points in NumPy, y = x * 0.1 + 0.3
x_data = np.random.rand(100).astype("float32")
y_data = x_data * 0.1 + 0.3
# Try to find values for W and b that compute y_data = W * x_data + b
# (We know that W should be 0.1 and b 0.3, but Tensorflow will
# figure that out for us.)
with tf.name_scope("calculatematmul") as scope:
W = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
b = tf.Variable(tf.zeros([1]))
y = W * x_data + b
# Minimize the mean squared errors.
loss = tf.reduce_mean(tf.square(y - y_data))
optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(loss)
# Before starting, initialize the variables. We will 'run' this first.
init = tf.initialize_all_variables()
# Launch the graph.
sess = tf.Session()
sess.run(init)
#### ----> ADD THIS LINE <---- ####
writer = tf.train.SummaryWriter('mnist_logs', sess.graph_def)
# Fit the line.
for step in xrange(201):
sess.run(train)
if step % 20 == 0:
print(step, sess.run(W), sess.run(b))
The code runs fine when I create a python file and run the file with
python test.py
also it runs fine in the jupyter notebook
However, while Tensorboard gets the information from running the python file (that is to say, it creates the xyz....home file), the interactive version does not create any info usable for Tensorboard.
Can somebody explain to me why, please!
Thanks
Peter

Be sure that you use the full path when starting tensorboard.
tensorboard --logdir='./somedirectory/mnist_logs'

Related

Tensorboard visualization don't appear in google collab

I am implementing a simple linear regression code in google collab and trying to visualize the results with tensorboard with the following command
%tensorboard --logdir=/tmp/lr-train.
However, when I run this command, the tensorboard just simply does not show up. Instead I just see the following message Reusing TensorBoard on port 6012 (pid 3219), started 0:07:06 ago. (Use '!kill 3219' to kill it.)
How to launch the tensorboard in my case? Here is the code I am trying to run:
%tensorflow_version 1.x
import tensorflow as tf
import numpy as np
N = 100
x_zeros = np.random.multivariate_normal(
mean=np.array((-1, -1)), cov = 0.1 * np.eye(2), size = (N//2))
y_zeros = np.zeros((N//2,))
x_ones = np.random.multivariate_normal(mean=np.array((1, 1)), cov = 0.1 * np.eye(2), size=(N//2))
y_ones = np.zeros((N//2))
x_np = np.vstack([x_zeros, x_ones])
y_np = np.concatenate([y_zeros, y_ones])
with tf.name_scope("placeholders"):
x = tf.placeholder(tf.float32, (N, 2))
y = tf.placeholder(tf.float32, (N, 1))
with tf.name_scope("weights"):
W = tf.Variable(tf.random_normal((2, 1)))
b = tf.Variable(tf.random_normal((1, )))
with tf.name_scope("prediction"):
y_pred = tf.matmul(x, W) + b
with tf.name_scope("loss"):
l = tf.reduce_sum((y - y_pred)**2)
with tf.name_scope("optim"):
train_op = tf.train.AdamOptimizer(0.05).minimize(l)
with tf.name_scope("summaries"):
tf.summary.scalar("loss", l)
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter('/tmp/lr-train', tf.get_default_graph())
n_steps = 100
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Train model
for i in range(n_steps):
feed_dict = {x: x_np, y: y_np.reshape(-1,1)}
_, summary, loss = sess.run([train_op, merged, l], feed_dict=feed_dict)
if i%10 == 0:
print("step %d, loss: %f" % (i, loss))
I tried an example in tf-2, and tensorboard launched without any issues with the same command.
I tried your code in Colab and was able to reproduce what you mentioned and found a solution that worked as described below.
Use a ”space” between —logdir and /tmp/lr-train instead of a =.
What did not work as mentioned in the question:
%load_ext tensorboard
%tensorboard --logdir=/tmp/lr-train
What did work:
%load_ext tensorboard
%tensorboard --logdir /tmp/lr-train
You can also terminate your active session and then rerun all cells afterward. But maybe you even found a way of killing a tensorboard within a session.

Tensorflow running animations from jupyter/Ipython

I'm going through tensorflow example of water droplets on water, code:
#Import libraries for simulation
import tensorflow as tf
import numpy as np
#Imports for visualization
import PIL.Image
from io import BytesIO
from IPython.display import clear_output, Image, display
#A function for displaying the state of the pond's surface as an image.
def DisplayArray(a, fmt='jpeg', rng=[0,1]):
"""Display an array as a picture."""
a = (a - rng[0])/float(rng[1] - rng[0])*255
a = np.uint8(np.clip(a, 0, 255))
f = BytesIO()
PIL.Image.fromarray(a).save(f, fmt)
clear_output(wait = True)
display(Image(data=f.getvalue()))
sess = tf.InteractiveSession()
def make_kernel(a):
"""Transform a 2D array into a convolution kernel"""
a = np.asarray(a)
a = a.reshape(list(a.shape) + [1,1])
return tf.constant(a, dtype=1)
def simple_conv(x, k):
"""A simplified 2D convolution operation"""
x = tf.expand_dims(tf.expand_dims(x, 0), -1)
y = tf.nn.depthwise_conv2d(x, k, [1, 1, 1, 1], padding='SAME')
return y[0, :, :, 0]
def laplace(x):
"""Compute the 2D laplacian of an array"""
laplace_k = make_kernel([[0.5, 1.0, 0.5],
[1.0, -6., 1.0],
[0.5, 1.0, 0.5]])
return simple_conv(x, laplace_k)
N = 500
# Initial Conditions -- some rain drops hit a pond
# Set everything to zero
u_init = np.zeros([N, N], dtype=np.float32)
ut_init = np.zeros([N, N], dtype=np.float32)
# Some rain drops hit a pond at random points
for n in range(40):
a,b = np.random.randint(0, N, 2)
u_init[a,b] = np.random.uniform()
DisplayArray(u_init, rng=[-0.1, 0.1])
# Parameters:
# eps -- time resolution
# damping -- wave damping
eps = tf.placeholder(tf.float32, shape=())
damping = tf.placeholder(tf.float32, shape=())
# Create variables for simulation state
U = tf.Variable(u_init)
Ut = tf.Variable(ut_init)
# Discretized PDE update rules
U_ = U + eps * Ut
Ut_ = Ut + eps * (laplace(U) - damping * Ut)
# Operation to update the state
step = tf.group(
U.assign(U_),
Ut.assign(Ut_))
# Initialize state to initial conditions
tf.global_variables_initializer().run()
# Run 1000 steps of PDE
for i in range(1000):
# Step simulation
step.run({eps: 0.03, damping: 0.04})
DisplayArray(U.eval(), rng=[-0.1, 0.1])
Then from Ipython I import partial_d but it doesn't generate the animation.
Anyone who's ever used tensorflow know how to fix this? Google mentions Ipython Notebook, couldn't find/set that up but I do have jupyter and latest Ipython installed.
Have you used jupyter before? I think you need to start your notebook server and run the code from within there.
Try running jupyter notebook and then importing your code into the notebook. Alternatively you could just copy and paste your code into a code cell and skip importing.
I'm unfamiliar with the example you are referring to but I don't think it's a TF problem. See how you do with running it through jupyter (the new name for iPython to clear up any confusion).
This got to me to speed on how to use jupyter and tensorflow to generate the animation of ripples.

Error with tensorflow 1.0 mnist code

I am now learning tensorflow 1.0 with python 3.5.2. I tried the following code found on github but i am getting the error No module named 'tensorflowvisu'. If i remove the import tensorflowvisu i get the error I = tensorflowvisu.tf_format_mnist_images(X, Ypred, Y_) # assembles 10x10 images by default
NameError: name 'tensorflowvisu' is not defined
What should i do to get this code to work? Does anyone have a working code for mnist with tensorflow 1.0 and python 3.5 that i can follow to learn? Any response appreciated.
https://github.com/martin-gorner/tensorflow-mnist-tutorial/blob/master/mnist_1.0_softmax.py
import tensorflow as tf
import tensorflowvisu
from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets
tf.set_random_seed(0)
# neural network with 1 layer of 10 softmax neurons
#
# · · · · · · · · · · (input data, flattened pixels) X [batch, 784] # 784 = 28 * 28
# \x/x\x/x\x/x\x/x\x/ -- fully connected layer (softmax) W [784, 10] b[10]
# · · · · · · · · Y [batch, 10]
# The model is:
#
# Y = softmax( X * W + b)
# X: matrix for 100 grayscale images of 28x28 pixels, flattened (there are 100 images in a mini-batch)
# W: weight matrix with 784 lines and 10 columns
# b: bias vector with 10 dimensions
# +: add with broadcasting: adds the vector to each line of the matrix (numpy)
# softmax(matrix) applies softmax on each line
# softmax(line) applies an exp to each value then divides by the norm of the resulting line
# Y: output matrix with 100 lines and 10 columns
# Download images and labels into mnist.test (10K images+labels) and mnist.train (60K images+labels)
mnist = read_data_sets("data", one_hot=True, reshape=False, validation_size=0)
# input X: 28x28 grayscale images, the first dimension (None) will index the images in the mini-batch
X = tf.placeholder(tf.float32, [None, 28, 28, 1])
# correct answers will go here
Y_ = tf.placeholder(tf.float32, [None, 10])
# weights W[784, 10] 784=28*28
W = tf.Variable(tf.zeros([784, 10]))
# biases b[10]
b = tf.Variable(tf.zeros([10]))
# flatten the images into a single line of pixels
# -1 in the shape definition means "the only possible dimension that will preserve the number of elements"
XX = tf.reshape(X, [-1, 784])
# The model
Y = tf.nn.softmax(tf.matmul(XX, W) + b)
# loss function: cross-entropy = - sum( Y_i * log(Yi) )
# Y: the computed output vector
# Y_: the desired output vector
# cross-entropy
# log takes the log of each element, * multiplies the tensors element by element
# reduce_mean will add all the components in the tensor
# so here we end up with the total cross-entropy for all images in the batch
cross_entropy = -tf.reduce_mean(Y_ * tf.log(Y)) * 1000.0 # normalized for batches of 100 images,
# *10 because "mean" included an unwanted division by 10
# accuracy of the trained model, between 0 (worst) and 1 (best)
correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# training, learning rate = 0.005
train_step = tf.train.GradientDescentOptimizer(0.005).minimize(cross_entropy)
# matplotlib visualisation
allweights = tf.reshape(W, [-1])
allbiases = tf.reshape(b, [-1])
I = tensorflowvisu.tf_format_mnist_images(X, Y, Y_) # assembles 10x10 images by default
It = tensorflowvisu.tf_format_mnist_images(X, Y, Y_, 1000, lines=25) # 1000 images on 25 lines
datavis = tensorflowvisu.MnistDataVis()
# init
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
# You can call this function in a loop to train the model, 100 images at a time
def training_step(i, update_test_data, update_train_data):
# training on batches of 100 images with 100 labels
batch_X, batch_Y = mnist.train.next_batch(100)
# compute training values for visualisation
if update_train_data:
a, c, im, w, b = sess.run([accuracy, cross_entropy, I, allweights, allbiases], feed_dict={X: batch_X, Y_: batch_Y})
datavis.append_training_curves_data(i, a, c)
datavis.append_data_histograms(i, w, b)
datavis.update_image1(im)
print(str(i) + ": accuracy:" + str(a) + " loss: " + str(c))
# compute test values for visualisation
if update_test_data:
a, c, im = sess.run([accuracy, cross_entropy, It], feed_dict={X: mnist.test.images, Y_: mnist.test.labels})
datavis.append_test_curves_data(i, a, c)
datavis.update_image2(im)
print(str(i) + ": ********* epoch " + str(i*100//mnist.train.images.shape[0]+1) + " ********* test accuracy:" + str(a) + " test loss: " + str(c))
# the backpropagation training step
sess.run(train_step, feed_dict={X: batch_X, Y_: batch_Y})
datavis.animate(training_step, iterations=2000+1, train_data_update_freq=10, test_data_update_freq=50, more_tests_at_start=True)
# to save the animation as a movie, add save_movie=True as an argument to datavis.animate
# to disable the visualisation use the following line instead of the datavis.animate line
# for i in range(2000+1): training_step(i, i % 50 == 0, i % 10 == 0)
print("max test accuracy: " + str(datavis.get_max_test_accuracy()))
# final max test accuracy = 0.9268 (10K iterations). Accuracy should peak above 0.92 in the first 2000 iterations.
I had the same issue. The solution is to run the code from within the folder where all the code resides. Don't just copy the mnist_1.0_softmax.py code to your IDE and run it. Download or clone entire repo from the link below
https://github.com/martin-gorner/tensorflow-mnist-tutorial.git
Once cloned, you will see that in that folder there is file called tensorflowvisu.py. So this is not a module that you install from conda or pip. It's just a file that the author uses as module in this precise case. Go to that dir where all this code sits through command line and from there run
python mnist_1.0_softmax.py
Now it should work. You should see a pop up windows with 6 charts that get updated in real-time.
If you want to run it from your IDE, then open your IDE (in my case Atom) and there go to File > Open Folder > Click OK > choose your file mnist_1.0_softmax.py and press Ctrl+Shift+B.
the same pop up window should appear.
The most important thing is to open the file from within the dir provided by the author.
try to install matplotlib using pip install --upgrade matplotlib
with anaconda prompt

tensorflow control_flow_ops not working good

I have a piece of tensorflow code which use control_flow_ops.cond to select which result to use:
import tensorflow as tf
import numpy as np
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.client import timeline
import time
with tf.device('/cpu:0'):
a_arr = []
b = tf.Variable(tf.random_normal([1400, 5600]))
c_arr = []
d = tf.Variable(tf.zeros([1, 5600]))
e_arr = []
x = tf.placeholder(tf.int32, [250])
y = tf.placeholder(tf.int32, [250])
tf.scalar_summary('max/x', tf.reduce_max(x))
for i in range(0, 250):
a_arr.append(tf.Variable(tf.random_normal([1, 1400])))
#c = tf.matmul(a_arr[i], b)
**c = control_flow_ops.cond(x[i] < y[i], lambda: tf.matmul(a_arr[i], b), lambda:d)**
e_arr.append(c)
summary = tf.merge_all_summaries()
e_arr.append(summary)
init = tf.initialize_all_variables()
with tf.Session() as sess:
train_writer = tf.train.SummaryWriter('tensor_summary/train',
sess.graph)
sess.run(init)
xi = [ 1 for i in range(0, 250) ]
yi = [ 0 for i in range(0, 250) ]
print(np.sum(xi < yi))
for i in range(1000):
time_s = time.time()
out_arr = sess.run(e_arr, feed_dict={x:xi, y:yi})
train_writer.add_summary(out_arr[-1], 1)
time_e = time.time()
print('duration = %f' %(time_e - time_s))
Here tf.MatMul should not be executed, but it is actually executed, I run it on tensorflow 0.10.0, and on 32 core CPU, which use more than 900 CPU, and the execution time is 13ms, saving timeline data shows tf.MatMul is also executed.
This is a test case to test tensorflow control_flow_ops.cond, which is also used in bidirectional_rnn.
How could avoid executing tf.MatMul in this case while still make use of control_flow_ops.cond to dynamically select one out of the two results?
Is there any settings?
The MatMuls are not really executed, even though the timeline includes them. It is a bit confusing and we will consider to remove them. If you time both with and without cond, you should be able to see the difference.

Why is the gradient of tf.sign() not equal to 0?

I expected the gradient for tf.sign() in TensorFlow to be equal to 0 or None. However, when I examined the gradients, I found that they were equal to very small numbers (e.g. 1.86264515e-09). Why is that?
(If you are curious as to why I even want to know this, it is because I want to implement the "straight-through estimator" described here, and before overriding the gradient for tf.sign(), I wanted to check that the default behavior was in fact what I was expecting.)
EDIT: Here is some code which reproduces the error. The model is just the linear regression model from the introduction to TensorFlow, except that I use y=sign(W)x + b instead of y=Wx + b.
import tensorflow as tf
import numpy as np
def gvdebug(g, v):
g2 = tf.zeros_like(g, dtype=tf.float32)
v2 = tf.zeros_like(v, dtype=tf.float32)
g2 = g
v2 = v
return g2,v2
# Create 100 phony x, y data points in NumPy, y = x * 0.1 + 0.3
x_data = np.random.rand(100).astype(np.float32)
y_data = x_data * 0.1 + 0.3
# Try to find values for W and b that compute y_data = W * x_data + b
# (We know that W should be 0.1 and b 0.3, but TensorFlow will
# figure that out for us.)
W = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
b = tf.Variable(tf.zeros([1]))
y = tf.sign(W) * x_data + b
# Minimize the mean squared errors.
loss = tf.reduce_mean(tf.square(y - y_data))
optimizer = tf.train.GradientDescentOptimizer(0.5)
grads_and_vars = optimizer.compute_gradients(loss)
gv2 = [gvdebug(gv[0], gv[1]) for gv in grads_and_vars]
apply_grads = optimizer.apply_gradients(gv2)
# Before starting, initialize the variables. We will 'run' this first.
init = tf.initialize_all_variables()
# Launch the graph.
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.01)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
sess.run(init)
# Fit the line.
for step in range(201):
sess.run(apply_grads)
if (step % 20 == 0) or ((step-1) % 20 == 0):
print("")
print(sess.run(gv2[0][1])) #the variable
print(sess.run(gv2[0][0])) #the gradient
print("")
print(step, sess.run(W), sess.run(b))
# Learns best fit is W: [0.1], b: [0.3]