How do I suppress outputs from the TensorFlow Object Detection API? - tensorflow

I am using the TensorFlow Object Detection API. I trained a model and extracted a graph, but during inference I get messages that seem like prints of the graph layers and training parameters. Here's a taste:
Optimizing fused batch norm node name: "SecondStageFeatureExtractor/InceptionV2/Mixed_5c/Branch_3/Conv2d_0b_1x1/BatchNorm/FusedBatchNorm"
op: "FusedBatchNorm"
input: "SecondStageFeatureExtractor/InceptionV2/Mixed_5c/Branch_3/Conv2d_0b_1x1/Conv2D"
input: "SecondStageFeatureExtractor/InceptionV2/Mixed_5c/Branch_3/Conv2d_0b_1x1/BatchNorm/gamma"
input: "SecondStageFeatureExtractor/InceptionV2/Mixed_5c/Branch_3/Conv2d_0b_1x1/BatchNorm/beta"
input:
"SecondStageFeatureExtractor/InceptionV2/Mixed_5c/Branch_3/Conv2d_0b_1x1/BatchNorm/moving_mean"
input:
"SecondStageFeatureExtractor/InceptionV2/Mixed_5c/Branch_3/Conv2d_0b_1x1/BatchNorm/moving_variance"
device: "/job:localhost/replica:0/task:0/device:GPU:0"
attr { key:
"T" value {
type: DT_FLOAT } }
attr { key: "data_format" value {
s: "NHWC" } }
And many more.
How can I suppress these messages?
I already tried os.environ['TF_CPP_MIN_LOG_LEVEL'] and tf.logging.set_verbosity(tf.logging.ERROR). They do not suppress the object detection API messages.
My inference code is similar to the one presented here. Here is my code:
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
images = glob.glob(TEST_IMAGE_PATHS)
fig, ax = plt.subplots(1)
with detection_graph.as_default():
with tf.Session() as sess:
# Get handles to input and output tensors
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in ['num_detections', 'detection_boxes', 'detection_scores', 'detection_classes']:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(tensor_name)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
# Run inference
for image_filename in images:
image = nd.imread(image_filename)
image = imresize(image, 50)
output_dict = sess.run(tensor_dict, feed_dict={image_tensor: np.expand_dims(image, 0)})
# all outputs are float32 numpy arrays, so convert types as appropriate
boxes = output_dict['detection_boxes'][0]
plt.imshow(image)
shape = image.shape
for i in range(int(output_dict['num_detections'][0])):
box_y = boxes[i][0]*shape[0]
box_x = boxes[i][1]*shape[1]
box_h = (boxes[i][2] - boxes[i][0])*shape[0]
box_w = (boxes[i][3] - boxes[i][1])*shape[1]
ax.add_patch(patches.Rectangle((box_x, box_y), box_w, box_h, linewidth=1, edgecolor='r', facecolor='none'))
fig.show()
plt.waitforbuttonpress()
ax.clear()

Related

Tensorflow inference becomes slower and slower due to eval() function

So I have a frozen tensorflow model, which I can use to classify images. When I try to use this model to inference image one bye one, the model just runs slower and slower. I searched and find the problem may cause by eval() function, which will keep add new nodes to the graph, thus slows down the procedure.
Below is the key parts of my code:
with open('/tmp/frozen_resnet_v1_50.pb', 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
sess1 = tf.Session()
sess = tf.Session()
for root, dirs, files in os.walk(file_path):
for f in files:
# Read image one by one and preprocess
img = cv2.imread(os.path.join(root, f))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # BGR 2 RGB
img = image_preprocessing_fn(img, _IMAGE_HEIGHT, _IMAGE_WIDTH) # This function contains tf functions
img = img.eval(session=sess1)
img = np.reshape(img, [-1, _IMAGE_HEIGHT, _IMAGE_WIDTH, _IMAGE_CHANNEL]) # the input shape is 4 dimension
# Feed image to model
data = sess.graph.get_tensor_by_name('input:0')
predict = sess.graph.get_tensor_by_name('resnet_v1_50/predictions/Softmax:0')
out = sess.run(predict, feed_dict={data: img})
indices = np.argmax(out, 1)
print('Current image name: %s, predict result: %s' % (f, indices))
sess1.close()
sess.close()
PS:I use "sess1" to do the preprocess, I think maybe this is inappropriate. Hope someone can show me the correct way, thanks in advance.
Nobody answers...Here is my solution, it works!
with open('/tmp/frozen_resnet_v1_50.pb', 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
x = tf.placeholder(tf.uint8, shape=[None, None, 3])
y = image_preprocessing_fn(x, _IMAGE_HEIGHT, _IMAGE_WIDTH)
sess = tf.Session()
data = sess.graph.get_tensor_by_name('input:0')
predict = sess.graph.get_tensor_by_name('resnet_v1_50/predictions/Softmax:0')
for root, dirs, files in os.walk(file_path):
for f in files:
img = cv2.imread(os.path.join(root, f))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # BGR 2 RGB
img = sess.run(y, feed_dict={x: img})
img = np.reshape(img, [-1, _IMAGE_HEIGHT, _IMAGE_WIDTH, _IMAGE_CHANNEL])
out = sess.run(predict, feed_dict={data: img})
indices = np.argmax(out, 1)
print('Current image name: %s, predict result: %s' % (f, out))
sess.close()

Are there any tools/libraries that can convert tensorflow lstm model to .mlmodel format to run in iOS app

I have a simple tensorflow model that consists of lstm layers - such as tf.contrib.rnn.LSTMBlockCell or tf.keras.layers.LSTM (I can provide the sample code also, if needed). I want to run the model on an iOS app. However, I have looked at several websites that say that presently there is no way to convert and run tensorflow model that consist LSTM layers on iOS apps.
I have tried these tools/libraries to convert the tensorflow model to .mlmodel format (or .tflite format)
1. Swift for Tensorflow
2. Tensorflow Lite for iOS
3. tfcoreml
However, these tools also does not seem to be able to convert the lstm layers model to .mlmodel format. These tools, however allow to use custom layers to be added. But I don't know how I can add LSTM custom layer.
Am I wrong in saying that there is no support to run tensorflow lstm model in iOS apps? If yes, then please guide me on how I can go ahead to include the model in iOS app. Is there any other tool/library that can be ued to convert it to .mlmodel format. If no, then are there any plans to include tensorflow support for iOS in future?
Model
import tensorflow as tf
from tensorflow.contrib import rnn
from tensorflow.contrib.rnn import *
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
#Summary parameters
logs_path = "logs/"
# Training Parameters
learning_rate = 0.001
training_steps = 1000
batch_size = 128
display_step = 200
# Network Parameters
num_input = 28 # MNIST data input (img shape: 28*28)
timesteps = 28 # timesteps
num_hidden = 128 # hidden layer num of features
num_classes = 10 # MNIST total classes (0-9 digits)
# tf Graph input
X = tf.placeholder("float", [None, timesteps, num_input])
Y = tf.placeholder("float", [None, num_classes])
# Define weights
weights = {
'out': tf.Variable(tf.random_normal([num_hidden, num_classes]))
}
biases = {
'out': tf.Variable(tf.random_normal([num_classes]))
}
def RNN(x, weights, biases):
# Unstack to get a list of 'timesteps' tensors of shape (batch_size, n_input)
x = tf.unstack(x, timesteps, 1)
# Define a lstm cell with tensorflow
lstm_cell = rnn.LSTMBlockCell(num_hidden, forget_bias = 1.0)
#lstm_cell = tf.keras.layers.LSTMCell(num_hidden, unit_forget_bias=True)
# Get lstm cell output
outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
# Linear activation, using rnn inner loop last output
return tf.matmul(outputs[-1], weights['out']) + biases['out']
logits = RNN(X, weights, biases)
with tf.name_scope('Model'):
prediction = tf.nn.softmax(logits, name = "prediction_layer")
with tf.name_scope('Loss'):
# Define loss and optimizer
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y, name = "loss_layer"), name = "reduce_mean_loss")
with tf.name_scope('SGD'):
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate, name = "Gradient_Descent")
train_op = optimizer.minimize(loss_op, name = "minimize_layer")
with tf.name_scope('Accuracy'):
# Evaluate model (with test logits, for dropout to be disabled)
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1), name = "correct_pred_layer")
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name = "reduce_mean_acc_layer")
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
#Create a summary to monitor cost tensor
tf.summary.scalar("loss", loss_op)
#Create a summary to monitor accuracy tensor
tf.summary.scalar("accuracy", accuracy)
#Merge all summaries into a single op
merged_summary_op = tf.summary.merge_all()
saver = tf.train.Saver()
save_path = ""
model_save = "model.ckpt"
# Start training
with tf.Session() as sess:
# Run the initializer
sess.run(init)
# op to write logs to Tensorboard
summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())
for step in range(1, training_steps+1):
total_batch = int(mnist.train.num_examples/batch_size)
batch_x, batch_y = mnist.train.next_batch(batch_size)
# Reshape data to get 28 seq of 28 elements
batch_x = batch_x.reshape((batch_size, timesteps, num_input))
# Run optimization op (backprop)
sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})
if step % display_step == 0 or step == 1:
# Calculate batch loss and accuracy
loss, acc, summary = sess.run([loss_op, accuracy, merged_summary_op], feed_dict={X: batch_x,
Y: batch_y})
# Write logs at every iteration
summary_writer.add_summary(summary, step * total_batch)
print("Step " + str(step) + ", Minibatch Loss= " + \
"{:.4f}".format(loss) + ", Training Accuracy= " + \
"{:.3f}".format(acc))
saver.save(sess, model_save)
tf.train.write_graph(sess.graph_def, save_path, 'save_graph.pbtxt')
#print(sess.graph.get_operations())
print("Optimization Finished!")
print("Run the command line:\n" \
"--> tensorboard --logdir=logs/ " \
"\nThen open http://0.0.0.0:6006/ into your web browser")
# Calculate accuracy for 128 mnist test images
test_len = 128
test_data = mnist.test.images[:test_len].reshape((-1, timesteps, num_input))
test_label = mnist.test.labels[:test_len]
print("Testing Accuracy:", \
sess.run(accuracy, feed_dict={X: test_data, Y: test_label}))
Code to generate the frozen model
import tensorflow as tf
import numpy as np
from tensorflow.python.tools import freeze_graph
save_path = ""
model_name = "test_model_tf_keras_layers_lstm"
input_graph_path = save_path + "save_graph.pbtxt"
checkpoint_path = save_path + "model.ckpt"
input_saver_def_path = ""
input_binary = False
output_node_names = "Model/prediction_layer" #output node's name. Should match to that mentioned in the code
restore_op_name = 'save/restore_all'
filename_tensor_name = 'save/const:0'
output_frozen_graph_name = save_path + 'frozen_model' + '.pb' # name of .pb file that one would like to give
clear_devices = True
freeze_graph.freeze_graph(input_graph_path, input_saver_def_path, input_binary, checkpoint_path, output_node_names, restore_op_name, filename_tensor_name, output_frozen_graph_name, clear_devices, "")
print("Model Freezed")
Conversion Code to generate .mlmodel format file
import tfcoreml
coreml_model = tfcoreml.convert(tf_model_path = 'frozen_model_test_model_tf_keras_layers_lstm.pb',
mlmodel_path = 'test_model.mlmodel',
output_feature_names = ['Model/prediction_layer:0'],
add_custom_layers = True)
coreml_model.save("test_model.mlmodel")
Error message shown with
lstm_cell = rnn.BasicLSTMCell(num_hidden, name = "lstm_cell")
Value Error: Split op case not handled. Input shape = [1, 512], output shape = [1, 128]
Error message shown with
lstm_cell = rnn.LSTMBlockCell(num_hidden, name = "lstm_cell")
InvalidArgumentError (see above for traceback): No OpKernel was registered to support Op 'LSTMBlockCell' used by node rnn/lstm_cell/LSTMBlockCell (defined at /anaconda2/lib/python2.7/site-packages/tfcoreml/_tf_coreml_converter.py:153) with these attrs: [forget_bias=1, use_peephole=false, cell_clip=-1, T=DT_FLOAT]
Registered devices: [CPU]
Registered kernels:
<no registered kernels>
[[node rnn/lstm_cell/LSTMBlockCell (defined at /anaconda2/lib/python2.7/site-packages/tfcoreml/_tf_coreml_converter.py:153) ]]
I expect that the frozen tensorflow model can be converted to .mlmodel format.

got shape [4575, 32, 32, 3], but wanted [4575] Tensorflow

I'm a newbie of Tensorflow. I have created CNNs of Tensorflow followingthis topic : A Guide to TF Layers: Building a Convolutional Neural Network
I want to create CNNs to using it for training traffic sign dataset. The dataset I use is : BelgiumTS. It includes two part, one part stores images for training, second parth stores images for testing. All of this is .ppm format.
I define a method to load the dataset :
def load_data(data_dir):
"""Load Data and return two numpy array"""
directories = [d for d in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir,d))]
list_labels = []
list_images = []
for d in directories:
label_dir = os.path.join(data_dir,d)
file_names = [os.path.join(label_dir,f) for f in os.listdir(label_dir) if f.endswith(".ppm")]
for f in file_names:
list_images.append(skimage.data.imread(f))
list_labels.append(int(d))
#resize images to 32x32 pixel
list_images32 = [skimage.transform.resize(image,(32,32)) for image in list_images]
#Got Error "Value passed to parameter 'input' has DataType float64 not in list of allowed values: float16, float32" if I don't add this line
list_images32 = tf.cast(list_images32,tf.float32)
images = np.array(list_images32)
labels = np.asarray(list_labels,dtype=int32)
return images,labels
And this is CNNs define :
def cnn_model_fn(features, labels, mode):
#Input layer
input_layer = tf.reshape(features["x"],[-1,32,32,1])
#Convolutional layer 1
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=[5,5],
padding="same",
activation=tf.nn.relu)
#Pooling layer 1
pool1 = tf.layers.max_pooling2d(inputs=conv1,pool_size=[2,2],strides=2)
#Convolutional layer 2
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5,5],
padding="same",
activation=tf.nn.relu)
#Pooling layer 2
pool2 = tf.layers.max_pooling2d(inputs=conv2,pool_size=[2,2],strides=2)
#Dense layer
pool2_flat = tf.reshape(pool2,[-1,7*7*64])
dense = tf.layers.dense(inputs=pool2_flat,units=1024,activation=tf.nn.relu)
#Dropout
dropout = tf.layers.dropout(inputs=dense,rate=0.4,training=mode == tf.estimator.ModeKeys.TRAIN)
#Logits layer
logits = tf.layers.dense(inputs=dropout,units=10)
predictions = {
"classes": tf.argmax(input=logits,axis=1),
"probabilities": tf.nn.softmax(logits,name="softmax_tensor")
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode,predictions=predictions)
#Calculate Loss Value
onehot_labels = tf.one_hot(indices=tf.cast(labels,tf.int32),depth=10)
loss = tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels,logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(
loss = loss,
global_step = tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode,loss=loss,train_op=train_op)
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=labels,predictions=predictions["classes"])}
return tf.estimator.EstimatorSpec(mode=mode,loss=loss,eval_metric_ops=eval_metric_ops)
I run my app in main :
def main(unused_argv):
# Load training and eval data
train_data_dir = "W:/Projects/AutoDrive/Training"
test_data_dir = "W:/Projects/AutoDrive/Testing"
images,labels = load_data(train_data_dir)
test_images,test_labels = load_data(test_data_dir)
# Create the Estimator
autoDrive_classifier = tf.estimator.Estimator(
model_fn=cnn_model_fn, model_dir="/tmp/autoDrive_convnet_model")
# Set up logging for predictions
# Log the values in the "Softmax" tensor with label "probabilities"
tensors_to_log = {"probabilities": "softmax_tensor"}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=50)
# Train the model
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": images},
y=labels,
batch_size=100,
num_epochs=None,
shuffle=True)
autoDrive_classifier.train(
input_fn=train_input_fn,
steps=10000,
hooks=[logging_hook])
# Evaluate the model and print results
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": test_images},
y=test_labels,
num_epochs=1,
shuffle=False)
eval_results = autoDrive_classifier.evaluate(input_fn=eval_input_fn)
print(eval_results)
But when I run it, I got this error : ValueError: Argument must be a dense tensor ... got shape [4575, 32, 32, 3], but wanted [4575] Did I lost something ?
Finally, this is full code :
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import os
import skimage.data
import skimage.transform
import matplotlib
import matplotlib.pyplot as plt
tf.logging.set_verbosity(tf.logging.INFO)
def load_data(data_dir):
"""Load Data and return two lists"""
directories = [d for d in os.listdir(data_dir) if
os.path.isdir(os.path.join(data_dir,d))]
list_labels = []
list_images = []
for d in directories:
label_dir = os.path.join(data_dir,d)
file_names = [os.path.join(label_dir,f) for f in os.listdir(label_dir) if f.endswith(".ppm")]
for f in file_names:
list_images.append(skimage.data.imread(f))
list_labels.append(int(d))
list_images32 = [skimage.transform.resize(image,(32,32)) for image in list_images]
list_images32 = tf.cast(list_images32,tf.float32)
images = np.array(list_images32)
labels = np.asarray(list_labels,dtype=int32)
return images,labels
def cnn_model_fn(features, labels, mode):
#Input layer
input_layer = tf.reshape(features["x"],[-1,32,32,1])
#Convolutional layer 1
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=[5,5],
padding="same",
activation=tf.nn.relu)
#Pooling layer 1
pool1 = tf.layers.max_pooling2d(inputs=conv1,pool_size=[2,2],strides=2)
#Convolutional layer 2
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5,5],
padding="same",
activation=tf.nn.relu)
#Pooling layer 2
pool2 = tf.layers.max_pooling2d(inputs=conv2,pool_size=[2,2],strides=2)
#Dense layer
pool2_flat = tf.reshape(pool2,[-1,7*7*64])
dense = tf.layers.dense(inputs=pool2_flat,units=1024,activation=tf.nn.relu)
#Dropout
dropout = tf.layers.dropout(inputs=dense,rate=0.4,training=mode == tf.estimator.ModeKeys.TRAIN)
#Logits layer
logits = tf.layers.dense(inputs=dropout,units=10)
predictions = {
"classes": tf.argmax(input=logits,axis=1),
"probabilities": tf.nn.softmax(logits,name="softmax_tensor")
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode,predictions=predictions)
#Calculate Loss Value
onehot_labels = tf.one_hot(indices=tf.cast(labels,tf.int32),depth=10)
loss = tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels,logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(
loss = loss,
global_step = tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode,loss=loss,train_op=train_op)
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=labels,predictions=predictions["classes"])}
return tf.estimator.EstimatorSpec(mode=mode,loss=loss,eval_metric_ops=eval_metric_ops)
def main(unused_argv):
# Load training and eval data
train_data_dir = "W:/Projects/TSRecognition/Training"
test_data_dir = "W:/Projects/TSRecognition/Testing"
images,labels = load_data(train_data_dir)
test_images,test_labels = load_data(test_data_dir)
# Create the Estimator
TSRecognition_classifier = tf.estimator.Estimator(
model_fn=cnn_model_fn, model_dir="/tmp/TSRecognition_convnet_model")
# Set up logging for predictions
# Log the values in the "Softmax" tensor with label "probabilities"
tensors_to_log = {"probabilities": "softmax_tensor"}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=50)
# Train the model
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": images},
y=labels,
batch_size=100,
num_epochs=None,
shuffle=True)
TSRecognition_classifier.train(
input_fn=train_input_fn,
steps=10000,
hooks=[logging_hook])
# Evaluate the model and print results
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": test_images},
y=test_labels,
num_epochs=1,
shuffle=False)
eval_results = TSRecognition_classifier.evaluate(input_fn=eval_input_fn)
print(eval_results)
if __name__ == "__main__":
tf.app.run()
Short answer for your code:
Get rid of the np.array and np.asarray calls in your load_data function. In particular, change:
list_images32 = [skimage.transform.resize(image,(32,32)) for image in list_images]
...to...
list_images32 = [skimage.transform.resize(image,(32,32)).astype(np.float32).tolist() for image in list_images]
...and return list_images32 AS IS from your load_data function. Don't "wrap it" with the np.asarray() call. The tolist() part of my suggestion is what is important. With the astype() call I'm just suggesting doing in numpy something you're doing in TensorFlow.
Simply getting rid of the np.asarray you have on list_labels should suffice for your labels.
The full answer for those that want to understand what's going on...
The "got shape...but wanted" exception is thrown from exactly one place in TensorFlow (tensor_util.py) and the reason is this function:
def _GetDenseDimensions(list_of_lists):
"""Returns the inferred dense dimensions of a list of lists."""
if not isinstance(list_of_lists, (list, tuple)):
return []
elif not list_of_lists:
return [0]
else:
return [len(list_of_lists)] + _GetDenseDimensions(list_of_lists[0])
It is trying to traverse what it assumes are nested plain Python lists or plain Python tuples; it doesn't know what to do with the Numpy array type it finds in your data structure because of the np.array/np.asarray calls.

tensorfboard embeddings hangs with "Computing PCA"

I'm trying to display my embeddings in tensorboard. When I open embeddings tab of tensorboard I get: "Computing PCA..." and tensorboard hangs infinitely.
Before that it does load my tensor of shape 200x128. It does find the metadata file too.
I tried that on TF versions 0.12 and 1.1 with the same result.
features = np.zeros(shape=(num_batches*batch_size, 128), dtype=float)
embedding_var = tf.Variable(features, name='feature_embedding')
config = projector.ProjectorConfig()
embedding = config.embeddings.add()
embedding.tensor_name = 'feature_embedding'
metadata_path = os.path.join(self.log_dir, 'metadata.tsv')
embedding.metadata_path = metadata_path
with tf.Session(config=self.config) as sess:
tf.global_variables_initializer().run()
restorer = tf.train.Saver()
restorer.restore(sess, self.pretrained_model_path)
with open(metadata_path, 'w') as f:
for step in range(num_batches):
batch_images, batch_labels = data.next()
for label in batch_labels:
f.write('%s\n' % label)
feed_dict = {model.images: batch_images}
features[step*batch_size : (step+1)*batch_size, :] = \
sess.run(model.features, feed_dict)
sess.run(embedding_var.initializer)
projector.visualize_embeddings(tf.summary.FileWriter(self.log_dir), config)
I don't know what was wrong in the code above, but I rewrote it in a different way (below), and it works. The difference is when and how the embedding_var is initialized.
I also made a gist to copy-paste code from out of this.
# a numpy array for embeddings and a list for labels
features = np.zeros(shape=(num_batches*self.batch_size, 128), dtype=float)
labels = []
# compute embeddings batch by batch
with tf.Session(config=self.config) as sess:
tf.global_variables_initializer().run()
restorer = tf.train.Saver()
restorer.restore(sess, self.pretrained_model)
for step in range(num_batches):
batch_images, batch_labels = data.next()
labels += batch_labels
feed_dict = {model.images: batch_images}
features[step*self.batch_size : (step+1)*self.batch_size, :] = \
sess.run(model.features, feed_dict)
# write labels
metadata_path = os.path.join(self.log_dir, 'metadata.tsv')
with open(metadata_path, 'w') as f:
for label in labels:
f.write('%s\n' % label)
# write embeddings
with tf.Session(config=self.config) as sess:
config = projector.ProjectorConfig()
embedding = config.embeddings.add()
embedding.tensor_name = 'feature_embedding'
embedding.metadata_path = metadata_path
embedding_var = tf.Variable(features, name='feature_embedding')
sess.run(embedding_var.initializer)
projector.visualize_embeddings(tf.summary.FileWriter(self.log_dir), config)
saver = tf.train.Saver({"feature_embedding": embedding_var})
saver.save(sess, os.path.join(self.log_dir, 'model_features'))
It's a bug. It's fixed in tensorflow 1.13

Tensorflow inference with single image with CIFAR-10 example

I'm trying to inference single image using tensorflow cifar10 example:
https://www.tensorflow.org/versions/r0.8/tutorials/deep_cnn/index.html#convolutional-neural-networks
def restore_vars(saver, sess):
""" Restore saved net, global score and step, and epsilons OR
create checkpoint directory for later storage. """
#sess.run(tf.initialize_all_variables())
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
saver.restore(sess, ckpt.model_checkpoint_path)
return True
else:
print('No checkpoint file found')
return False
def eval_single_img():
input_img = tf.image.decode_jpeg(tf.read_file("test.jpg"), channels=3)
input_img =
input_img = tf.reshape(input_img, [3, 32, 32])
input_img = tf.transpose(input_img, [1, 2, 0])
reshaped_image = tf.cast(input_img, tf.float32)
resized_image = tf.image.resize_image_with_crop_or_pad(reshaped_image, 24, 24)
float_image = tf.image.per_image_whitening(resized_image)
image = tf.expand_dims(float_image, 0) # create a fake batch of images (batch_size = 1)
logits = cifar10.inference(image)
_, top_k_pred = tf.nn.top_k(logits, k=5)
# Restore the moving average version of the learned variables for eval.
variable_averages = tf.train.ExponentialMovingAverage(
cifar10.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
with tf.Session() as sess:
restored = restore_vars(saver, sess)
top_indices = sess.run([top_k_pred])
print ("Predicted ", top_indices[0], " for your input image.")
**ERROR MESSAGE:
tensorflow.python.framework.errors.InvalidArgumentError: Assign requires shapes of both tensors to match. lhs shape= [18,384] rhs shape= [2304,384]
[[Node: save/Assign_5 = Assign[T=DT_FLOAT, _class=["loc:#local3/weights"], use_locking=true, validate_shape=true, _device="/job:localhost/replica:0/task:0/cpu:0"](local3/weights, save/restore_slice_5)]]
Caused by op u'save/Assign_5', defined at:
What might be causing this?**