tensorflow : restore from checkpoint for continue training - tensorflow

in this case ,i want to continue train my model from checkpoint.i use the cifar-10 example and did a little change in cifar-10_train.py like below,they are almost the same,except i want to restore from checkpoint:
i replaced cifar-10 by md.
"""
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os.path
import time
import numpy
import tensorflow.python.platform
from tensorflow.python.platform import gfile
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import md
"""
"""
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/root/test/INT/tbc',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 60000, # 55000 steps per epoch
"""Number of batches to run.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
tf.app.flags.DEFINE_string('pretrained_model_checkpoint_path', '/root/test/INT/',
"""If specified, restore this pretrained model """
"""before beginning any training.""")
def error_rate(predictions, labels):
"""Return the error rate based on dense predictions and 1-hot labels."""
return 100.0 - (
100.0 *
numpy.sum(numpy.argmax(predictions, 0) == numpy.argmax(labels, 0)) /
predictions.shape[0])
def train():
"""Train MD65500 for a number of steps."""
with tf.Graph().as_default():
# global_step = tf.Variable(0, trainable=False)
global_step = tf.get_variable(
'global_step', [],
initializer=tf.constant_initializer(0), trainable=False)
# Get images and labels for CIFAR-10.
images, labels = md.distorted_inputs()
# Build a Graph that computes the logits predictions from the
# inference model.
logits = md.inference(images)
# Calculate loss.
loss = md.loss(logits, labels)
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
train_op = md.train(loss, global_step)
# Predictions for the minibatch. there is no validation set or test set.
# train_prediction = tf.nn.softmax(logits)
train_prediction = logits
# Create a saver.
saver = tf.train.Saver(tf.all_variables())
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
# Build an initialization operation to run below.
init = tf.initialize_all_variables()
# Start running operations on the Graph.
# sess = tf.Session(config=tf.ConfigProto(
# log_device_placement=FLAGS.log_device_placement))
# sess.run(init)
sess = tf.Session(config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement))
# sess.run(init)
if FLAGS.pretrained_model_checkpoint_path:
assert tf.gfile.Exists(FLAGS.pretrained_model_checkpoint_path)
# variables_to_restore = tf.get_collection(
# slim.variables.VARIABLES_TO_RESTORE)
variable_averages = tf.train.ExponentialMovingAverage(
md.MOVING_AVERAGE_DECAY)
variables_to_restore = {}
for v in tf.all_variables():
if v in tf.trainable_variables():
restore_name = variable_averages.average_name(v)
else:
restore_name = v.op.name
variables_to_restore[restore_name] = v
ckpt = tf.train.get_checkpoint_state(FLAGS.pretrained_model_checkpoint_path)
if ckpt and ckpt.model_checkpoint_path:
# global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
restorer = tf.train.Saver(variables_to_restore)
restorer.restore(sess, ckpt.model_checkpoint_path)
print('%s: Pre-trained model restored from %s' %
(datetime.now(), ckpt.model_checkpoint_path))
# print("variables_to_restore")
# print(variables_to_restore)
else:
sess.run(init)
# Start the queue runners.
tf.train.start_queue_runners(sess=sess)
summary_writer = tf.train.SummaryWriter(FLAGS.train_dir,
graph_def=sess.graph) #####graph_def=sess.graph_def)
# tf.add_to_collection('train_op', train_op)
for step in xrange(FLAGS.max_steps):
start_time = time.time()
_, loss_value, predictions = sess.run([train_op, loss, train_prediction])
duration = time.time() - start_time
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if step % 100 == 0:
num_examples_per_step = FLAGS.batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), step, loss_value,
examples_per_sec, sec_per_batch))
# print('Minibatch error: %.5f%%' % error_rate(predictions, labels))
if step % 100 == 0:
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
# Save the model checkpoint periodically.
if step % 1000 == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
def main(argv=None): # pylint: disable=unused-argument
# md.maybe_download()
# if gfile.Exists(FLAGS.train_dir):
# gfile.DeleteRecursively(FLAGS.train_dir)
# gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()
when i run the code,errors like this:
[root#bogon md try]# pythonnew mdtbc_3.py
I tensorflow/stream_executor/dso_loader.cc:105] successfully opened CUDA library libcublas.so locally
I tensorflow/stream_executor/dso_loader.cc:105] successfully opened CUDA library libcudnn.so locally
I tensorflow/stream_executor/dso_loader.cc:105] successfully opened CUDA library libcufft.so locally
I tensorflow/stream_executor/dso_loader.cc:105] successfully opened CUDA library libcuda.so.1 locally
I tensorflow/stream_executor/dso_loader.cc:105] successfully opened CUDA library libcurand.so locally
Filling queue with 4000 CIFAR images before starting to train. This will take a few minutes.
I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:900] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
I tensorflow/core/common_runtime/gpu/gpu_init.cc:102] Found device 0 with properties:
name: GeForce GTX 980 Ti
major: 5 minor: 2 memoryClockRate (GHz) 1.228
pciBusID 0000:01:00.0
Total memory: 6.00GiB
Free memory: 5.78GiB
I tensorflow/core/common_runtime/gpu/gpu_init.cc:126] DMA: 0
I tensorflow/core/common_runtime/gpu/gpu_init.cc:136] 0: Y
I tensorflow/core/common_runtime/gpu/gpu_device.cc:755] Creating TensorFlow device (/gpu:0) -> (device: 0, name: GeForce GTX 980 Ti, pci bus id: 0000:01:00.0)
2016-08-30 17:12:48.883303: Pre-trained model restored from /root/test/INT/model.ckpt-59999
WARNING:tensorflow:When passing a `Graph` object, please use the `graph` named argument instead of `graph_def`.
Traceback (most recent call last):
File "mdtbc_3.py", line 195, in <module>
tf.app.run()
File "/usr/local/pythonnew/lib/python2.7/site-packages/tensorflow/python/platform/app.py", line 30, in run
sys.exit(main(sys.argv))
File "mdtbc_3.py", line 191, in main
train()
File "mdtbc_3.py", line 160, in train
_, loss_value, predictions = sess.run([train_op, loss, train_prediction])
File "/usr/local/pythonnew/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 340, in run
run_metadata_ptr)
File "/usr/local/pythonnew/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 564, in _run
feed_dict_string, options, run_metadata)
File "/usr/local/pythonnew/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 637, in _do_run
target_list, options, run_metadata)
File "/usr/local/pythonnew/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 659, in _do_call
e.code)
tensorflow.python.framework.errors.FailedPreconditionError: Attempting to use uninitialized value conv2/weights
[[Node: conv2/weights/read = Identity[T=DT_FLOAT, _class=["loc:#conv2/weights"], _device="/job:localhost/replica:0/task:0/cpu:0"](conv2/weights)]]
Caused by op u'conv2/weights/read', defined at:
File "mdtbc_3.py", line 195, in <module>
tf.app.run()
File "/usr/local/pythonnew/lib/python2.7/site-packages/tensorflow/python/platform/app.py", line 30, in run
sys.exit(main(sys.argv))
File "mdtbc_3.py", line 191, in main
train()
File "mdtbc_3.py", line 77, in train
logits = md.inference(images)
File "/root/test/md try/md.py", line 272, in inference
stddev=0.1, wd=0.0)
File "/root/test/md try/md.py", line 114, in _variable_with_weight_decay
tf.truncated_normal_initializer(stddev=stddev))
File "/root/test/md try/md.py", line 93, in _variable_on_cpu
var = tf.get_variable(name, shape, initializer=initializer)
File "/usr/local/pythonnew/lib/python2.7/site-packages/tensorflow/python/ops/variable_scope.py", line 339, in get_variable
collections=collections)
File "/usr/local/pythonnew/lib/python2.7/site-packages/tensorflow/python/ops/variable_scope.py", line 262, in get_variable
collections=collections, caching_device=caching_device)
File "/usr/local/pythonnew/lib/python2.7/site-packages/tensorflow/python/ops/variable_scope.py", line 158, in get_variable
dtype=variable_dtype)
File "/usr/local/pythonnew/lib/python2.7/site-packages/tensorflow/python/ops/variables.py", line 209, in __init__
dtype=dtype)
File "/usr/local/pythonnew/lib/python2.7/site-packages/tensorflow/python/ops/variables.py", line 318, in _init_from_args
self._snapshot = array_ops.identity(self._variable, name="read")
File "/usr/local/pythonnew/lib/python2.7/site-packages/tensorflow/python/ops/gen_array_ops.py", line 609, in identity
return _op_def_lib.apply_op("Identity", input=input, name=name)
File "/usr/local/pythonnew/lib/python2.7/site-packages/tensorflow/python/ops/op_def_library.py", line 655, in apply_op
op_def=op_def)
File "/usr/local/pythonnew/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 2154, in create_op
original_op=self._default_original_op, op_def=op_def)
File "/usr/local/pythonnew/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 1154, in __init__
self._traceback = _extract_stack()
when i uncomment the line 107 "sess.run(init)" ,it runs perfectly,but a initialised model,just a new one from sctrach. i want to restore variables from checkpoint , and continue my training.i want to restore.

Without having the rest of your code handy, I'd say that the following part is problematic:
for v in tf.all_variables():
if v in tf.trainable_variables():
restore_name = variable_averages.average_name(v)
else:
restore_name = v.op.name
variables_to_restore[restore_name] = v
Because you specify a list of variables you want to restore here, but you exclude some (i.e. the v.op.name for the ones in the trainable vars). That will change the name of the variable in the net that throws the error (again, without the rest of the code, I cannot really say), s.t. one (or more) vars are not restored properly. Two approaches (which are not very sophisticated) will help you here:
If you do not store all variables, do an initialization first, and then restore the variables you have actually stored. This makes sure that tensors you do not really care about get initialized none the less
TF is very efficient when it comes to storing nets. If in doubt, store all variables ...

Related

How to use a model trained on GPU using CudnnLSTM on CPU?

Tensorflow version 1.6.0 on Ubuntu 16.04.
Network uses CudnnLSTM https://www.tensorflow.org/api_docs/python/tf/contrib/cudnn_rnn/CudnnLSTM
Model export and prediction works on GPU. But while exporting and inferencing on CPU gives the below error.
File "/home/deepak/.local/lib/python2.7/site-packages/tensorflow/contrib/cudnn_rnn/python/layers/cudnn_rnn.py", line 501, in _create_saveable
name="%s_saveable" % self.trainable_variables[0].name.split(":")[0])
File "/home/deepak/.local/lib/python2.7/site-packages/tensorflow/contrib/cudnn_rnn/python/ops/cudnn_rnn_ops.py", line 262, in __init__
weights, biases = self._OpaqueParamsToCanonical()
File "/home/deepak/.local/lib/python2.7/site-packages/tensorflow/contrib/cudnn_rnn/python/ops/cudnn_rnn_ops.py", line 315, in _OpaqueParamsToCanonical
direction=self._direction)
File "/home/deepak/.local/lib/python2.7/site-packages/tensorflow/contrib/cudnn_rnn/ops/gen_cudnn_rnn_ops.py", line 769, in cudnn_rnn_params_to_canonical
name=name)
File "/home/deepak/.local/lib/python2.7/site-packages/tensorflow/python/framework/op_def_library.py", line 787, in _apply_op_helper
op_def=op_def)
File "/home/deepak/.local/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 3290, in create_op
op_def=op_def)
File "/home/deepak/.local/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 1654, in __init__
self._traceback = self._graph._extract_stack() # pylint: disable=protected-access
InvalidArgumentError (see above for traceback): No OpKernel was registered to support Op 'CudnnRNNParamsToCanonical' with these attrs. Registered devices: [CPU], Registered kernels:
<no registered kernels>
[[Node: CudnnRNNParamsToCanonical = CudnnRNNParamsToCanonical[T=DT_FLOAT, direction="bidirectional", dropout=0, input_mode="linear_input", num_params=16, rnn_mode="lstm", seed=0, seed2=0, _device="/device:GPU:0"](CudnnRNNParamsToCanonical/num_layers, CudnnRNNParamsToCanonical/num_units, CudnnRNNParamsToCanonical/input_size, cudnn_lstm/opaque_kernel/read)]]
And the export code is as below:
with tf.Graph().as_default() as graph:
inputs, outputs = create_graph()
# Create a saver using variables from the above newly created graph
saver = tf.train.Saver(tf.global_variables())
with tf.Session() as sess:
# Restore the model from last checkpoints
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
saver.restore(sess, ckpt.model_checkpoint_path)
# (re-)create export directory
export_path = os.path.join(
tf.compat.as_bytes(FLAGS.export_dir),
tf.compat.as_bytes(str(FLAGS.export_version)))
if os.path.exists(export_path):
shutil.rmtree(export_path)
# create model builder
builder = tf.saved_model.builder.SavedModelBuilder(export_path)
input_node = graph.get_tensor_by_name('input_node:0')
input_lengths = graph.get_tensor_by_name('input_lengths:0')
outputs = graph.get_tensor_by_name('output_node:0')
# create tensors info
predict_tensor_inputs_info = tf.saved_model.utils.build_tensor_info(input_node)
predict_tensor_inputs_length_info = tf.saved_model.utils.build_tensor_info(input_lengths)
predict_tensor_scores_info = tf.saved_model.utils.build_tensor_info(outputs)
# build prediction signature
prediction_signature = (
tf.saved_model.signature_def_utils.build_signature_def(
inputs={'input': predict_tensor_inputs_info,'input_len':predict_tensor_inputs_length_info},
outputs={'output': predict_tensor_scores_info},
method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME
)
)
# save the model
builder.add_meta_graph_and_variables(
sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map={
'infer': prediction_signature
})
builder.save()

Tensorflow does NOT utilize the memory from two GPUs in Windows 10

Tensorflow Version 1.3.0
OS: Windows 10
GPUs: Nvidia Quadro M4000 * 2 with 8G GPU memory for each
GPU modes: one for WDDM, one for TCC
I tested the official codes at https://github.com/tensorflow/models/blob/master/official/resnet/imagenet_main.py
I just add the GPU constraints in the main function as:
def main(unused_argv):
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
# For this line, visible_divice_list set to only "0" and "0, 1" can only support the same batch_size
config = tf.ConfigProto(gpu_options=tf.GPUOptions(visible_device_list='0, 1'))
resnet_classifier = tf.estimator.Estimator(
model_fn=imagenet_model_fn, model_dir=FLAGS.model_dir,
config=tf.contrib.learn.RunConfig(session_config=config))
for cycle in range(FLAGS.train_steps // FLAGS.steps_per_eval):
tensors_to_log = {
'learning_rate': 'learning_rate',
'cross_entropy': 'cross_entropy',
'train_accuracy': 'train_accuracy'
}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=100)
print('Starting a training cycle.')
resnet_classifier.train(
input_fn=lambda: input_fn(tf.estimator.ModeKeys.TRAIN),
steps=FLAGS.first_cycle_steps or FLAGS.steps_per_eval,
hooks=[logging_hook])
FLAGS.first_cycle_steps = None
print('Starting to evaluate.')
eval_results = resnet_classifier.evaluate(
input_fn=lambda: input_fn(tf.estimator.ModeKeys.EVAL))
print(eval_results)
In the training process, if I set the visible device list to "0, 1" or "0" only, both can run successfully with batch_size=48, but BOTH failed with batch_size=49! This indicates that the second GPU's memory is not utilized, as batch size could not be bigger when using two GPUs. I have use Nvidia-smi to confirm that only one or two GPUs are used in the above experiments.
My questions are:
Is there any way that I can use bigger batch_size when using two GPUs?
If the answer for Q1 is No in Windows, is there any way to do it in Linux? I am not familiar with Linux. In Linux, can I set all GPUs to TCC mode? Will the batch size be bigger when two GPUs are both in TCC mode?
Thank you.
-------------Update------------
I have tried to distributed the data batch on two GPUs and there is NaN loss error now. Would there be any possible cause for this? It runs well before (using one GPU only). But now even I set the _DEVICE_LIST to one GPU only, it still produce the NaN loss error.
My modified codes are:
def imagenet_model_fn(features, labels, mode):
tf.summary.image('images', features, max_outputs=6)
with tf.device('/cpu:0'):
split_batch = tf.split(features, len(_DEVICE_LIST))
split_labels = tf.split(labels, len(_DEVICE_LIST))
all_predictions = {
'classes': [],
'probabilities': []
}
all_cross_entropy = []
all_reg_loss = []
with tf.variable_scope(tf.get_variable_scope()):
for dev_idx, (device, device_features, device_labels) in enumerate(zip(
_DEVICE_LIST, split_batch, split_labels)):
with tf.device(device):
with tf.name_scope('device_%d' % dev_idx):
logits = network(inputs=device_features,
is_training=(mode == tf.estimator.ModeKeys.TRAIN))
tf.get_variable_scope().reuse_variables()
all_predictions['classes'].append(tf.argmax(logits, axis=1))
all_predictions['probabilities'].append(tf.nn.softmax(logits))
if mode == tf.estimator.ModeKeys.TRAIN:
# Calculate loss, which includes softmax cross entropy and L2 regularization.
cross_entropy = tf.losses.softmax_cross_entropy(
logits=logits, onehot_labels=device_labels)
reg_loss = FLAGS.weight_decay * tf.add_n(
[tf.nn.l2_loss(v) for v in tf.trainable_variables()])
all_cross_entropy.append(cross_entropy)
all_reg_loss.append(reg_loss)
all_predictions['classes'] = tf.reshape(all_predictions['classes'], [-1])
all_predictions['probabilities'] = tf.reshape(
all_predictions['probabilities'], [-1])
total_cross_entropy = tf.add_n(all_cross_entropy)
total_reg_loss = tf.add_n(all_reg_loss)
total_loss = total_cross_entropy + total_reg_loss
tf.identity(total_cross_entropy, name='cross_entropy')
tf.summary.scalar('cross_entropy', total_cross_entropy)
tf.summary.scalar('reg_loss', total_reg_loss)
tf.summary.scalar('total_loss', total_loss)
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_or_create_global_step()
boundaries = [
int(batches_per_epoch * epoch) for epoch in [30, 60, 120, 150]]
values = [
_INITIAL_LEARNING_RATE * decay for decay in [1, 0.1, 0.01, 1e-3, 1e-4]]
learning_rate = tf.train.piecewise_constant(
tf.cast(global_step, tf.int32), boundaries, values)
tf.identity(learning_rate, name='learning_rate')
tf.summary.scalar('learning_rate', learning_rate)
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate,
momentum=_MOMENTUM)
# Batch norm requires update_ops to be added as a train_op dependency.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(total_loss, global_step)
else:
train_op = None
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=all_predictions,
loss=total_loss,
train_op=train_op)
The error message is:
INFO:tensorflow:Saving checkpoints for 1 into F:\projects\DeepLearning\TensorFlow\Models\ImageNet\resnet_101_imagenet_augmented\temp\model.ckpt.
INFO:tensorflow:learning_rate = 0.003125, cross_entropy = 14.394
INFO:tensorflow:loss = 30.0782, step = 1
ERROR:tensorflow:Model diverged with loss = NaN.
Traceback (most recent call last):
File "imagenet_main.py", line 321, in <module>
tf.app.run()
File "C:\Users\User\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\platform\app.py", line 48, in run
_sys.exit(main(_sys.argv[:1] + flags_passthrough))
File "imagenet_main.py", line 310, in main
hooks=[logging_hook])
File "C:\Users\User\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\estimator\estimator.py", line 241, in train
loss = self._train_model(input_fn=input_fn, hooks=hooks)
File "C:\Users\User\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\estimator\estimator.py", line 686, in _train_model
_, loss = mon_sess.run([estimator_spec.train_op, estimator_spec.loss])
File "C:\Users\User\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\training\monitored_session.py", line 518, in run
run_metadata=run_metadata)
File "C:\Users\User\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\training\monitored_session.py", line 862, in run
run_metadata=run_metadata)
File "C:\Users\User\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\training\monitored_session.py", line 818, in run
return self._sess.run(*args, **kwargs)
File "C:\Users\User\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\training\monitored_session.py", line 980, in run
run_metadata=run_metadata))
File "C:\Users\User\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\training\basic_session_run_hooks.py", line 551, in after_run
raise NanLossDuringTrainingError
tensorflow.python.training.basic_session_run_hooks.NanLossDuringTrainingError: NaN loss during training.

Memory error when initializing Xception using Keras

I am having difficulty implementing the pre-trained Xception model for binary classification over new set of classes. The model is successfully returned from the following function:
#adapted from:
#https://github.com/fchollet/keras/issues/4465
from keras.applications.xception import Xception
from keras.layers import Input, Flatten, Dense
from keras.models import Model
def get_xception(in_shape,trn_conv):
#Get back the convolutional part of Xception trained on ImageNet
model = Xception(weights='imagenet', include_top=False)
#Here the input images have been resized to 299x299x3, so this is the
#same as Xception's native input
input = Input(in_shape,name = 'image_input')
#Use the generated model
output = model(input)
#Only train the top fully connected layers (keep pre-trained feature extractors)
for layer in model.layers:
layer.trainable = False
#Add the fully-connected layers
x = Flatten(name='flatten')(output)
x = Dense(2048, activation='relu', name='fc1')(x)
x = Dense(2048, activation='relu', name='fc2')(x)
x = Dense(2, activation='softmax', name='predictions')(x)
#Create your own model
my_model = Model(input=input, output=x)
my_model.compile(loss='binary_crossentropy', optimizer='SGD')
return my_model
This returns fine, however when I run this code:
model=get_xception(shp,trn_feat)
in_data=HDF5Matrix(str_trn,'/inputs')
labels=HDF5Matrix(str_trn,'/labels')
model.fit(in_data,labels,shuffle="batch")
I get the following error:
File "/home/tsmith/.virtualenvs/keras/local/lib/python2.7/site-packages/keras/engine/training.py", line 1576, in fit
self._make_train_function()
File "/home/tsmith/.virtualenvs/keras/local/lib/python2.7/site-packages/keras/engine/training.py", line 960, in _make_train_function
loss=self.total_loss)
File "/home/tsmith/.virtualenvs/keras/local/lib/python2.7/site-packages/keras/legacy/interfaces.py", line 87, in wrapper
return func(*args, **kwargs)
File "/home/tsmith/.virtualenvs/keras/local/lib/python2.7/site-packages/keras/optimizers.py", line 169, in get_updates
v = self.momentum * m - lr * g # velocity
File "/home/tsmith/.virtualenvs/keras/local/lib/python2.7/site-packages/tensorflow/python/ops/variables.py", line 705, in _run_op
return getattr(ops.Tensor, operator)(a._AsTensor(), *args)
File "/home/tsmith/.virtualenvs/keras/local/lib/python2.7/site-packages/tensorflow/python/ops/math_ops.py", line 865, in binary_op_wrapper
return func(x, y, name=name)
File "/home/tsmith/.virtualenvs/keras/local/lib/python2.7/site-packages/tensorflow/python/ops/math_ops.py", line 1088, in _mul_dispatch
return gen_math_ops._mul(x, y, name=name)
File "/home/tsmith/.virtualenvs/keras/local/lib/python2.7/site-packages/tensorflow/python/ops/gen_math_ops.py", line 1449, in _mul
result = _op_def_lib.apply_op("Mul", x=x, y=y, name=name)
File "/home/tsmith/.virtualenvs/keras/local/lib/python2.7/site-packages/tensorflow/python/framework/op_def_library.py", line 767, in apply_op
op_def=op_def)
File "/home/tsmith/.virtualenvs/keras/local/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 2630, in create_op
original_op=self._default_original_op, op_def=op_def)
File "/home/tsmith/.virtualenvs/keras/local/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 1204, in __init__
self._traceback = self._graph._extract_stack() # pylint: disable=protected-access
ResourceExhaustedError (see above for traceback): OOM when allocating tensor with shape[204800,2048]
[[Node: training/SGD/mul = Mul[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/gpu:0"](SGD/momentum/read, training/SGD/Variable/read)]]
I have been tracing the function calls for hours now and still can't figure out what is happening. The system should be far above and beyond the requirements. System specs:
Ubuntu Version: 14.04.5 LTS
Tensorflow Version: 1.3.0
Keras Version: 2.0.7
28x dual core Inten Xeon processor (1.2 GHz)
4x NVidia GeForce 1080 (8Gb memory each)
Any clues as to what is going wrong here?
Per Yu-Yang, the simplest solution was to reduce the batch size, everything ran fine after that!

Tensorflow object_detection: unable to find input and output tensors

I've successfully trained and saved a faster RCNN model for tensorflow using their object detection API. I'm now trying to run some inferences on the code, taking bits of code from this tutorial.
However, after I successfully restore the metagraph and the checkpoint, the system can't find the input and output nodes, I get the following error:
KeyError: "The name 'image_tensor:0' refers to a Tensor which does not
exist. The operation, 'image_tensor', does not exist in the graph."
The checkpoint and metagraph were created by the train.py script, on my own data, following the instructions given here.
This is my code:
OUTPUT_DIR = "my_path/models/SSD_v1/train"
CKPT_DIR = OUTPUT_DIR
LATEST_CKPT_FILENAME = "checkpoint"
LAST_CKPT_FILE = os.path.join(CKPT_DIR, LATEST_CKPT_FILENAME)
MODEL_FILENAME_PATH = os.path.join(OUTPUT_DIR, "model.ckpt.meta")
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
def test_model(images_list, path_to_ckpt=None,
meta_graph=None):
if path_to_ckpt is None:
path_to_ckpt = tf.train.latest_checkpoint(CKPT_DIR, LATEST_CKPT_FILENAME)
if meta_graph is None:
meta_graph = MODEL_FILENAME_PATH
print("test_model launched")
tf.reset_default_graph()
detection_graph = tf.Graph()
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
# Restore graph
saver = tf.train.import_meta_graph(meta_graph, clear_devices=True)
print('metagraph restored')
saver.restore(sess, path_to_ckpt)
print('graph restored')
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0') # This is where the error happens
# Each box represents a part of the image where a particular object was detected.
detected_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
detected_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detected_classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = graph.get_tensor_by_name('num_detections:0')
print("Output tensors: ")
print(detected_boxes)
print(detected_scores)
print(detected_classes)
print('')
for i, image in enumerate(images_list):
detected_boxes, detected_scores, detected_classes, num_detect = sess.run([detected_boxes, detected_scores, detected_classes, num_detections],
feed_dict={image_tensor: image})
print(i, num_detect, detected_boxes, detected_scores, detected_classes)
def main():
directory_path = "../data/samples/"
image_files = [f for f in os.listdir(directory_path) if os.path.isfile(os.path.join(directory_path, f))]
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_list = [ np.expand_dims(load_image_into_numpy_array(Image.open(os.path.join(directory_path, f))), axis=0) for f in image_files]
test_model(images_list=image_list)
if __name__=="__main__":
main()
Full error stacktrace:
Traceback (most recent call last): File "/home/guillaumedelaboulaye/PR8210PANO/faster-rcnn/pano_faster_rcnn/src/run_faster_rcnn_inference.py", line 99, in <module>
main() File "/home/guillaumedelaboulaye/PR8210PANO/faster-rcnn/pano_faster_rcnn/src/run_faster_rcnn_inference.py", line 95, in main
test_model(images_list=image_list) File "/home/guillaumedelaboulaye/PR8210PANO/faster-rcnn/pano_faster_rcnn/src/run_faster_rcnn_inference.py", line 48, in test_model
image_tensor = graph.get_tensor_by_name('image_tensor:0') File "/home/guillaumedelaboulaye/PR8210PANO/faster-rcnn/venv/lib/python3.5/site-packages/tensorflow/python/framework/ops.py", line 2733, in get_tensor_by_name
return self.as_graph_element(name, allow_tensor=True, allow_operation=False) File "/home/guillaumedelaboulaye/PR8210PANO/faster-rcnn/venv/lib/python3.5/site-packages/tensorflow/python/framework/ops.py", line 2584, in as_graph_element
return self._as_graph_element_locked(obj, allow_tensor, allow_operation) File "/home/guillaumedelaboulaye/PR8210PANO/faster-rcnn/venv/lib/python3.5/site-packages/tensorflow/python/framework/ops.py", line 2626, in _as_graph_element_locked
"graph." % (repr(name), repr(op_name))) KeyError: "The name 'image_tensor:0' refers to a Tensor which does not exist. The operation, 'image_tensor', does not exist in the graph."
In the train graph, the input/output nodes are not given those names. What you will need to do is to "export" your trained model via the export_inference_graph.py tool. I believe it currently exports it to a frozen graph or a SavedModel, but in future releases, it will export to ordinary checkpoint as well.
If you want sample code for finding the node names of the graph, referring to the object_detection_tutorial.ipynb, after the "Load a (frozen) Tensorflow model into memory." block:
for node in od_graph_def.node:
print node.name
That should list all the node names that you can then enter in the subsequent blocks.

Running distributed Tensorflow with InvalidArgumentError: You must feed a value for placeholder tensor 'Placeholder' with dtype float

I have implemented a variational autoencoder with tensorflow on a single machine. Now I am trying to run it on my cluster with the distributed mechanism provided tensorflow. But the following problem had stuck me for several days.
Traceback (most recent call last):
File "/home/yama/mfs/ZhuSuan/examples/vae.py", line 265, in <module>
print('>> Test log likelihood = {}'.format(np.mean(test_lls)))
File "/usr/lib/python2.7/contextlib.py", line 35, in __exit__
self.gen.throw(type, value, traceback)
File "/mfs/yama/tensorflow/local/lib/python2.7/site-packages/tensorflow/python/training/supervisor.py", line 942, in managed_session
self.stop(close_summary_writer=close_summary_writer)
File "/mfs/yama/tensorflow/local/lib/python2.7/site-packages/tensorflow/python/training/supervisor.py", line 768, in stop
stop_grace_period_secs=self._stop_grace_secs)
File "/mfs/yama/tensorflow/local/lib/python2.7/site-packages/tensorflow/python/training/coordinator.py", line 322, in join
six.reraise(*self._exc_info_to_raise)
File "/mfs/yama/tensorflow/local/lib/python2.7/site-packages/tensorflow/python/training/coordinator.py", line 267, in stop_on_exception
yield
File "/mfs/yama/tensorflow/local/lib/python2.7/site-packages/tensorflow/python/training/coordinator.py", line 411, in run
self.run_loop()
File "/mfs/yama/tensorflow/local/lib/python2.7/site-packages/tensorflow/python/training/supervisor.py", line 972, in run_loop
self._sv.global_step])
File "/mfs/yama/tensorflow/local/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 372, in run
run_metadata_ptr)
File "/mfs/yama/tensorflow/local/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 636, in _run
feed_dict_string, options, run_metadata)
File "/mfs/yama/tensorflow/local/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 708, in _do_run
target_list, options, run_metadata)
File "/mfs/yama/tensorflow/local/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 728, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors.InvalidArgumentError: You must feed a value for placeholder tensor 'Placeholder' with dtype float
[[Node: Placeholder = Placeholder[dtype=DT_FLOAT, shape=[], _device="/job:worker/replica:0/task:0/gpu:0"]()]]
[[Node: model_1/fully_connected_10/Relu_G88 = _Recv[client_terminated=false, recv_device="/job:worker/replica:0/task:0/cpu:0", send_device="/job:worker/replica:0/task:0/gpu:0", send_device_incarnation=3964479821165574552, tensor_name="edge_694_model_1/fully_connected_10/Relu", tensor_type=DT_FLOAT, _device="/job:worker/replica:0/task:0/cpu:0"]()]]
Caused by op u'Placeholder', defined at:
File "/home/yama/mfs/ZhuSuan/examples/vae.py", line 201, in <module>
x = tf.placeholder(tf.float32, shape=(None, x_train.shape[1]))
File "/mfs/yama/tensorflow/local/lib/python2.7/site-packages/tensorflow/python/ops/array_ops.py", line 895, in placeholder
name=name)
File "/mfs/yama/tensorflow/local/lib/python2.7/site-packages/tensorflow/python/ops/gen_array_ops.py", line 1238, in _placeholder
name=name)
File "/mfs/yama/tensorflow/local/lib/python2.7/site-packages/tensorflow/python/ops/op_def_library.py", line 704, in apply_op
op_def=op_def)
File "/mfs/yama/tensorflow/local/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 2260, in create_op
original_op=self._default_original_op, op_def=op_def)
File "/mfs/yama/tensorflow/local/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 1230, in __init__
self._traceback = _extract_stack()
Here is my code, I just paste the main function for simplicity:
if __name__ == "__main__":
tf.set_random_seed(1234)
# Load MNIST
data_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'data', 'mnist.pkl.gz')
x_train, t_train, x_valid, t_valid, x_test, t_test = \
dataset.load_mnist_realval(data_path)
x_train = np.vstack([x_train, x_valid])
np.random.seed(1234)
x_test = np.random.binomial(1, x_test, size=x_test.shape).astype('float32')
# Define hyper-parametere
n_z = 40
# Define training/evaluation parameters
lb_samples = 1
ll_samples = 5000
epoches = 10
batch_size = 100
test_batch_size = 100
iters = x_train.shape[0] // batch_size
test_iters = x_test.shape[0] // test_batch_size
test_freq = 10
ps_hosts = FLAGS.ps_hosts.split(",")
worker_hosts = FLAGS.worker_hosts.split(",")
# Create a cluster from the parameter server and worker hosts.
clusterSpec = tf.train.ClusterSpec({"ps": ps_hosts, "worker": worker_hosts})
print("Create and start a server for the local task.")
# Create and start a server for the local task.
server = tf.train.Server(clusterSpec,
job_name=FLAGS.job_name,
task_index=FLAGS.task_index)
print("Start ps and worker server")
if FLAGS.job_name == "ps":
server.join()
elif FLAGS.job_name == "worker":
#set distributed device
with tf.device(tf.train.replica_device_setter(
worker_device="/job:worker/task:%d" % FLAGS.task_index,
cluster=clusterSpec)):
print("Build the training computation graph")
# Build the training computation graph
x = tf.placeholder(tf.float32, shape=(None, x_train.shape[1]))
optimizer = tf.train.AdamOptimizer(learning_rate=0.001, epsilon=1e-4)
with tf.variable_scope("model") as scope:
with pt.defaults_scope(phase=pt.Phase.train):
train_model = M1(n_z, x_train.shape[1])
train_vz_mean, train_vz_logstd = q_net(x, n_z)
train_variational = ReparameterizedNormal(
train_vz_mean, train_vz_logstd)
grads, lower_bound = advi(
train_model, x, train_variational, lb_samples, optimizer)
infer = optimizer.apply_gradients(grads)
print("Build the evaluation computation graph")
# Build the evaluation computation graph
with tf.variable_scope("model", reuse=True) as scope:
with pt.defaults_scope(phase=pt.Phase.test):
eval_model = M1(n_z, x_train.shape[1])
eval_vz_mean, eval_vz_logstd = q_net(x, n_z)
eval_variational = ReparameterizedNormal(
eval_vz_mean, eval_vz_logstd)
eval_lower_bound = is_loglikelihood(
eval_model, x, eval_variational, lb_samples)
eval_log_likelihood = is_loglikelihood(
eval_model, x, eval_variational, ll_samples)
global_step = tf.Variable(0)
saver = tf.train.Saver()
summary_op = tf.merge_all_summaries()
init_op = tf.initialize_all_variables()
# Create a "supervisor", which oversees the training process.
sv = tf.train.Supervisor(is_chief=(FLAGS.task_index == 0),
logdir=LogDir,
init_op=init_op,
summary_op=summary_op,
saver=saver,
global_step=global_step,
save_model_secs=600)
# Run the inference
with sv.managed_session(server.target) as sess:
epoch = 0
while not sv.should_stop() and epoch < epoches:
#for epoch in range(1, epoches + 1):
np.random.shuffle(x_train)
lbs = []
for t in range(iters):
x_batch = x_train[t * batch_size:(t + 1) * batch_size]
x_batch = np.random.binomial( n=1, p=x_batch, size=x_batch.shape).astype('float32')
_, lb = sess.run([infer, lower_bound], feed_dict={x: x_batch})
lbs.append(lb)
if epoch % test_freq == 0:
test_lbs = []
test_lls = []
for t in range(test_iters):
test_x_batch = x_test[
t * test_batch_size: (t + 1) * test_batch_size]
test_lb, test_ll = sess.run(
[eval_lower_bound, eval_log_likelihood],
feed_dict={x: test_x_batch}
)
test_lbs.append(test_lb)
test_lls.append(test_ll)
print('>> Test lower bound = {}'.format(np.mean(test_lbs)))
print('>> Test log likelihood = {}'.format(np.mean(test_lls)))
sv.stop()
I have try to correct my code for several days, but all my efforts have failed. Looking for your help!
The most likely cause of this exception is that one of the operations that the tf.train.Supervisor runs in the background depends on the tf.placeholder() tensor x, but doesn't have enough information to feed a value for it.
The most likely culprit is summary_op = tf.merge_all_summaries(), because library code often summarizes values that depend on the training data. To prevent the supervisor from collecting summaries in the background, pass summary_op=None to the tf.train.Supervisor constructor:
# Create a "supervisor", which oversees the training process.
sv = tf.train.Supervisor(is_chief=(FLAGS.task_index == 0),
logdir=LogDir,
init_op=init_op,
summary_op=None,
saver=saver,
global_step=global_step,
save_model_secs=600)
After doing this, you will need to make alternative arrangements to collect summaries. The easiest way to do this is to pass summary_op to sess.run() periodically, then pass the result to sv.summary_computed().
Came across a similar thing. The chief was going down with the aforementioned error message. However, since I was using the MonitoredTrainingSession rather than a self-made Supervisor, I was able to solve the problem by disabling the default summary. To disable, you have to provide
save_summaries_secs=None,
save_summaries_steps=None,
to the constructor of the MonitoredTrainingSession. Afterwards, everything went just smooth!
Code on Github
I had the same exact problem. Following mrry's suggestion I was able to work this out by:
Disabling summary logging in the supervisor by setting summary_op=None (as mrry suggested)
Creating my own summary_op and pass it to sess.run() along with the rest of the ops to be evaluated. Hold on the resulting summary, let's say it's called 'my_summary'.
Creating my own summary writer. Call it with 'my_summary', e.g.: summary_writer.add_summary(summary, epoch_count)
To clarify, I did not use mrry's suggestion to do
sess.run(summary_op) and sv.summary_computed(), but instead ran the summary_op along with the other operations, and then wrote out the summary myself. You might also want to condition the summary writing on being a chief.
So basically, you need to bypass the Supervisor's summary writing services completely. Seems like surprising limitation/bug of Supervisor since it isn't exactly uncommon to want to log things that depend on the input (which lives in a placeholder). For example in my network (an autoencoder) the cost depends on the input.