dynamic_rnn() and array_ops.reverse_sequence() problems - tensorflow

I am trying to reverse my inputs with array_ops.reverse_sequence() before sending it to dynamic_rnn(), the inference graph can be build with no problem, but when building the training graph, I got the following error:
Traceback (most recent call last):
File "bin/trainer.py", line 158, in <module>
kmer_len=args.kmer_len)
File "/home/ubuntu/GIT/IvyMike/ivymike/base_model.py", line 193, in run_training
train_op = model.training(loss, learning_rate)
File "/home/ubuntu/GIT/IvyMike/ivymike/base_model.py", line 100, in training
train_op = optimizer.minimize(loss, global_step=global_step)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/optimizer.py", line 190, in minimize
colocate_gradients_with_ops=colocate_gradients_with_ops)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/optimizer.py", line 241, in compute_gradients
colocate_gradients_with_ops=colocate_gradients_with_ops)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gradients.py", line 481, in gradients
in_grads = _AsList(grad_fn(op, *out_grads))
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/array_grad.py", line 307, in _ReverseSequenceGrad
seq_lengths=seq_lengths),
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gen_array_ops.py", line 1143, in reverse_sequence
batch_dim=batch_dim, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/op_def_library.py", line 655, in apply_op
op_def=op_def)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 2119, in create_op
set_shapes_for_outputs(ret)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 1586, in set_shapes_for_outputs
shapes = shape_func(op)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/array_ops.py", line 1257, in _ReverseSequenceShape
(batch_dim, input_shape.ndims))
TypeError: %d format: a number is required, not NoneType
Any idea what went wrong?

This has been fixed at the mater in TensorFlow

Related

Unable to run tensorflow code

Here is the piece of code I cloned off of Github and I am having a hard time getting it to work.
def lstm_doc_enc(input_cnn,
batch_size=20,
num_rnn_layers=2,
rnn_size=650,
max_doc_length=35,
dropout=0.0):
# lstm document encoder
with tf.variable_scope('LSTMenc') as scope:
def create_rnn_cell():
cell = tf.contrib.rnn.BasicLSTMCell(rnn_size, state_is_tuple=True, forget_bias=0.0, reuse=True)
if dropout > 0.0:
cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=1.-dropout)
return cell
if num_rnn_layers > 1:
cell = tf.contrib.rnn.MultiRNNCell([create_rnn_cell() for _ in range(num_rnn_layers)], state_is_tuple=True)
else:
cell = create_rnn_cell()
initial_rnn_state = cell.zero_state(batch_size, dtype=tf.float32)
input_cnn = tf.reshape(input_cnn, [batch_size, max_doc_length, -1])
input_cnn2 = [tf.squeeze(x, [1]) for x in tf.split(input_cnn, max_doc_length, 1)]
outputs, final_rnn_state = tf.contrib.rnn.static_rnn(cell, input_cnn2,
initial_state=initial_rnn_state, dtype=tf.float32)
return adict(
initial_enc_state=initial_rnn_state,
final_enc_state=final_rnn_state,
enc_outputs=outputs
)
I cloned it from the repository of NeuralSum.
If I leave reuse=True while creating the BasicLSTMCell, it gives the following error
Traceback (most recent call last):
File "pretrain.py", line 358, in <module>
tf.app.run()
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/platform/app.py", line 48, in run
_sys.exit(main(_sys.argv[:1] + flags_passthrough))
File "pretrain.py", line 244, in main
train_model = build_model(word_vocab, train=True)
File "pretrain.py", line 146, in build_model
dropout=FLAGS.dropout))
File "/home/raghuram.vadapalli/styletransfer/NeuralSum/model.py", line 218, in lstm_doc_enc
initial_state=initial_rnn_state, dtype=tf.float32)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/rnn/python/ops/core_rnn.py", line 197, in static_rnn
(output, state) = call_cell()
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/rnn/python/ops/core_rnn.py", line 184, in <lambda>
call_cell = lambda: cell(input_, state)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/rnn/python/ops/core_rnn_cell_impl.py", line 713, in __call__
output, new_state = self._cell(inputs, state, scope)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/rnn/python/ops/core_rnn_cell_impl.py", line 241, in __call__
concat = _linear([inputs, h], 4 * self._num_units, True)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/rnn/python/ops/core_rnn_cell_impl.py", line 1044, in _linear
_WEIGHTS_VARIABLE_NAME, [total_arg_size, output_size], dtype=dtype)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/variable_scope.py", line 1049, in get_variable
use_resource=use_resource, custom_getter=custom_getter)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/variable_scope.py", line 948, in get_variable
use_resource=use_resource, custom_getter=custom_getter)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/variable_scope.py", line 356, in get_variable
validate_shape=validate_shape, use_resource=use_resource)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/variable_scope.py", line 341, in _true_getter
use_resource=use_resource)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/variable_scope.py", line 671, in _get_single_variable
"VarScope?" % name)
ValueError: Variable Model/LSTMenc/rnn/basic_lstm_cell/weights does not exist, or was not created with tf.get_variable(). Did you mean to set reuse=None in VarScope?
If I don't put reuse=True, I get this other error:
Traceback (most recent call last):
File "pretrain.py", line 358, in <module>
tf.app.run()
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/platform/app.py", line 48, in run
_sys.exit(main(_sys.argv[:1] + flags_passthrough))
File "pretrain.py", line 251, in main
valid_model = build_model(word_vocab, train=False)
File "pretrain.py", line 200, in build_model
dropout=FLAGS.dropout))
File "/home/raghuram.vadapalli/styletransfer/NeuralSum/model.py", line 218, in lstm_doc_enc
initial_state=initial_rnn_state, dtype=tf.float32)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/rnn/python/ops/core_rnn.py", line 197, in static_rnn
(output, state) = call_cell()
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/rnn/python/ops/core_rnn.py", line 184, in <lambda>
call_cell = lambda: cell(input_, state)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/rnn/python/ops/core_rnn_cell_impl.py", line 235, in __call__
with _checked_scope(self, scope or "basic_lstm_cell", reuse=self._reuse):
File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__
return self.gen.next()
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/rnn/python/ops/core_rnn_cell_impl.py", line 93, in _checked_scope
"the argument reuse=True." % (scope_name, type(cell).__name__))
ValueError: Attempt to have a second RNNCell use the weights of a variable scope that already has weights: 'Model/LSTMenc/rnn/basic_lstm_cell'; and the cell was not constructed as BasicLSTMCell(..., reuse=True). To share the weights of an RNNCell, simply reuse it in your second calculation, or create a new one with the argument reuse=True.
I don't have a lot of experience with variable scopes in tensorflow. I tried to google a lot and it came of no use. I hope someone understands what is wrong here and help me.

How to calculate auc in tensorflow in an easy way?

sess.run(tf.metrics.auc(labels, preds))
I try to use tensorflow to calculate auc. My version is 1.0.
But there are some errors.
Anyone can give an example about how to use it ?
I see some previous questions but it seems out of date.
Anyone can give the right codes in the new version ?
FailedPreconditionError: Attempting to use uninitialized value auc/false_positives
[[Node: auc/false_positives/read = IdentityT=DT_FLOAT, _class=["loc:#auc/false_positives"], _device="/job:localhost/replica:0/task:0/cpu:0"]]
Caused by op u'auc/false_positives/read', defined at:
File "/home/xuemeng.cyn/anaconda2/bin/ipython", line 6, in <module>
sys.exit(IPython.start_ipython())
File "/home/xuemeng.cyn/anaconda2/lib/python2.7/site-packages/IPython/__init__.py", line 119, in start_ipython
return launch_new_instance(argv=argv, **kwargs)
File "/home/xuemeng.cyn/anaconda2/lib/python2.7/site-packages/traitlets/config/application.py", line 658, in launch_instance
app.start()
File "/home/xuemeng.cyn/anaconda2/lib/python2.7/site-packages/IPython/terminal/ipapp.py", line 348, in start
self.shell.mainloop()
File "/home/xuemeng.cyn/anaconda2/lib/python2.7/site-packages/IPython/terminal/interactiveshell.py", line 486, in mainloop
self.interact()
File "/home/xuemeng.cyn/anaconda2/lib/python2.7/site-packages/IPython/terminal/interactiveshell.py", line 477, in interact
self.run_cell(code, store_history=True)
File "/home/xuemeng.cyn/anaconda2/lib/python2.7/site-packages/IPython/core/interactiveshell.py", line 2717, in run_cell
interactivity=interactivity, compiler=compiler, result=result)
File "/home/xuemeng.cyn/anaconda2/lib/python2.7/site-packages/IPython/core/interactiveshell.py", line 2827, in run_ast_nodes
if self.run_code(code, result):
File "/home/xuemeng.cyn/anaconda2/lib/python2.7/site-packages/IPython/core/interactiveshell.py", line 2881, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-29-2bca3b61757e>", line 1, in <module>
sess.run(tf.metrics.auc(labels, preds))
File "/home/xuemeng.cyn/anaconda2/lib/python2.7/site-packages/tensorflow/python/ops/metrics_impl.py", line 626, in auc
labels, predictions, thresholds, weights)
File "/home/xuemeng.cyn/anaconda2/lib/python2.7/site-packages/tensorflow/python/ops/metrics_impl.py", line 544, in _confusion_matrix_at_thresholds
false_p = _create_local('false_positives', shape=[num_thresholds])
File "/home/xuemeng.cyn/anaconda2/lib/python2.7/site-packages/tensorflow/python/ops/metrics_impl.py", line 196, in _create_local
validate_shape=validate_shape)
File "/home/xuemeng.cyn/anaconda2/lib/python2.7/site-packages/tensorflow/python/ops/variable_scope.py", line 1679, in variable
caching_device=caching_device, name=name, dtype=dtype)
File "/home/xuemeng.cyn/anaconda2/lib/python2.7/site-packages/tensorflow/python/ops/variables.py", line 199, in __init__
expected_shape=expected_shape)
File "/home/xuemeng.cyn/anaconda2/lib/python2.7/site-packages/tensorflow/python/ops/variables.py", line 330, in _init_from_args
self._snapshot = array_ops.identity(self._variable, name="read")
File "/home/xuemeng.cyn/anaconda2/lib/python2.7/site-packages/tensorflow/python/ops/gen_array_ops.py", line 1400, in identity
result = _op_def_lib.apply_op("Identity", input=input, name=name)
File "/home/xuemeng.cyn/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/op_def_library.py", line 767, in apply_op
op_def=op_def)
File "/home/xuemeng.cyn/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 2630, in create_op
original_op=self._default_original_op, op_def=op_def)
File "/home/xuemeng.cyn/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 1204, in __init__
self._traceback = self._graph._extract_stack() # pylint: disable=protected-access
FailedPreconditionError (see above for traceback): Attempting to use uninitialized value auc/false_positives
[[Node: auc/false_positives/read = Identity[T=DT_FLOAT, _class=["loc:#auc/false_positives"], _device="/job:localhost/replica:0/task:0/cpu:0"](auc/false_positives)]]
The auc function creates local variables: true_positives, true_negatives, false_positives and false_negatives that are used to compute the AUC. So you need to initialize them :
tf.local_variables_initializer().run()

Tensorflow: The replica master 0 exited with a non-zero status of 1

Each time I'm trying to train and evaluate my tensorflow training set on the Cloud platform, the job terminates midway showing this error.
Given below is the error message I had received:
The replica master 0 exited with a non-zero status of 1. Termination
reason: Error. Traceback (most recent call last): [...] File
"/root/.local/lib/python2.7/site-
packages/object_detection/evaluator.py", line 132, in evaluate
ignore_groundtruth=eval_config.ignore_groundtruth) File
"/root/.local/lib/python2.7/site-
packages/object_detection/evaluator.py", line 51, in
_extract_prediction_tensors input_dict = create_input_dict_fn() File
"/root/.local/lib/python2.7/site-
packages/object_detection/builders/input_reader_builder.py", line 61,
in build min_after_dequeue=input_reader_config.min_after_dequeue) File
"/usr/local/lib/python2.7/dist-
packages/tensorflow/contrib/slim/python/slim/data/parallel_reader.py",
line 234, in parallel_read
reader_kwargs=reader_kwargs).read(filename_queue) File
"/usr/local/lib/python2.7/dist-
packages/tensorflow/contrib/slim/python/slim/data/parallel_reader.py",
line 132, in read
enqueue_ops.append(self._common_queue.enqueue(reader.read(queue)))
File "/usr/local/lib/python2.7/dist-
packages/tensorflow/python/ops/io_ops.py", line 191, in read return
gen_io_ops._reader_read_v2(self._reader_ref, queue_ref, name=name)
File "/usr/local/lib/python2.7/dist-
packages/tensorflow/python/ops/gen_io_ops.py", line 410, in
_reader_read_v2 queue_handle=queue_handle, name=name) File
"/usr/local/lib/python2.7/dist-
packages/tensorflow/python/framework/op_def_library.py", line 763, in
apply_op op_def=op_def) File "/usr/local/lib/python2.7/dist-
packages/tensorflow/python/framework/ops.py", line 2327, in create_op
original_op=self._default_original_op, op_def=op_def) File
"/usr/local/lib/python2.7/dist-
packages/tensorflow/python/framework/ops.py", line 1226, in __init__
self._traceback = _extract_stack() UnimplementedError (see above for
traceback): File system scheme sgs not implemented [[Node:
parallel_read/ReaderReadV2 =
ReaderReadV2[_device="/job:localhost/replica:0/task:0/cpu:0"]
(parallel_read/TFRecordReaderV2, parallel_read/filenames)]]

im2txt UnimplementedError (see above for traceback): TensorArray has size zero when run Training when changing new data

I got an error when I changed new images to train the im2txt model. Don't know why.
Build the model.
bazel build -c opt im2txt/...
bazel-bin/im2txt/train
--input_file_pattern="${MY_DATA_DIR}/train-?????-of-00256"
--inception_checkpoint_file="${INCEPTION_CHECKPOINT}"
--train_dir="${MODEL_DIR}/train"
--train_inception=false
--number_of_steps=10000
It went to error when running below sentence
sequence_length = tf.reduce_sum(self.input_mask, 1)
lstm_outputs, _ = tf.nn.dynamic_rnn(cell=lstm_cell,
inputs=self.seq_embeddings,
sequence_length=sequence_length,
initial_state=initial_state,
dtype=tf.float32,
scope=lstm_scope)
The detail info is below
INFO:tensorflow:global_step/sec: 0
INFO:tensorflow:global step 1: loss = 9.5415 (37.21 sec/step)
INFO:tensorflow:global step 2: loss = 6.6332 (12.90 sec/step)
INFO:tensorflow:global step 3: loss = 3.1327 (13.01 sec/step)
INFO:tensorflow:global step 4: loss = 6.2893 (12.04 sec/step)
INFO:tensorflow:Error reported to Coordinator: <class 'tensorflow.python.framework.errors_impl.UnimplementedError'>, TensorArray has size zero, but element shape is not fully defined. Currently only static shapes are supported when packing zero-size TensorArrays.
[[Node: OptimizeLoss/gradients/lstm/lstm/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3_grad/TensorArrayGatherV3 = TensorArrayGatherV3[_class=["loc:#lstm/lstm/TensorArray_1"], dtype=DT_FLOAT, element_shape=, _device="/job:localhost/replica:0/task:0/cpu:0"](OptimizeLoss/gradients/lstm/lstm/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3_grad/TensorArrayGrad/TensorArrayGradV3, lstm/lstm/TensorArrayUnstack/range, OptimizeLoss/gradients/lstm/lstm/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3_grad/TensorArrayGrad/gradient_flow)]]
Caused by op u'OptimizeLoss/gradients/lstm/lstm/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3_grad/TensorArrayGatherV3', defined at:
File "/data/projects/content_creator/image2text/im2txt/bazel-bin/im2txt/train.runfiles/im2txt/im2txt/train.py", line 155, in
tf.app.run()
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/platform/app.py", line 44, in run
_sys.exit(main(_sys.argv[:1] + flags_passthrough))
File "/data/projects/content_creator/image2text/im2txt/bazel-bin/im2txt/train.runfiles/im2txt/im2txt/train.py", line 135, in main
learning_rate_decay_fn=learning_rate_decay_fn)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/layers/python/layers/optimizers.py", line 226, in optimize_loss
colocate_gradients_with_ops=colocate_gradients_with_ops)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/optimizer.py", line 345, in compute_gradients
colocate_gradients_with_ops=colocate_gradients_with_ops)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gradients_impl.py", line 482, in gradients
in_grads = grad_fn(op, *out_grads)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/tensor_array_grad.py", line 186, in _TensorArrayScatterGrad
grad = g.gather(indices)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/tensor_array_ops.py", line 328, in gather
element_shape=element_shape)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gen_data_flow_ops.py", line 2226, in _tensor_array_gather_v3
element_shape=element_shape, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/op_def_library.py", line 763, in apply_op
op_def=op_def)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 2327, in create_op
original_op=self._default_original_op, op_def=op_def)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 1226, in init
self._traceback = _extract_stack()
...which was originally created as op u'lstm/lstm/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3', defined at:
File "/data/projects/content_creator/image2text/im2txt/bazel-bin/im2txt/train.runfiles/im2txt/im2txt/train.py", line 155, in
tf.app.run()
[elided 0 identical lines from previous traceback]
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/platform/app.py", line 44, in run
_sys.exit(main(sys.argv[:1] + flags_passthrough))
File "/data/projects/content_creator/image2text/im2txt/bazel-bin/im2txt/train.runfiles/im2txt/im2txt/train.py", line 89, in main
model.build()
File "/data/projects/content_creator/image2text/im2txt/im2txt/show_and_tell_model.py", line 437, in build
self.build_model()
File "/data/projects/content_creator/image2text/im2txt/im2txt/show_and_tell_model.py", line 356, in build_model
scope=lstm_scope)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn.py", line 546, in dynamic_rnn
dtype=dtype)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn.py", line 664, in dynamic_rnn_loop
for ta, input in zip(input_ta, flat_input))
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn.py", line 664, in
for ta, input in zip(input_ta, flat_input))
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/tensor_array_ops.py", line 380, in unstack
indices=math_ops.range(0, num_elements), value=value, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/tensor_array_ops.py", line 408, in scatter
name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gen_data_flow_ops.py", line 2492, in _tensor_array_scatter_v3
name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/op_def_library.py", line 763, in apply_op
op_def=op_def)
UnimplementedError (see above for traceback): TensorArray has size zero, but element shape is not fully defined. Currently only static shapes are supported when packing zero-size TensorArrays.
[[Node: OptimizeLoss/gradients/lstm/lstm/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3_grad/TensorArrayGatherV3 = TensorArrayGatherV3[_class=["loc:#lstm/lstm/TensorArray_1"], dtype=DT_FLOAT, element_shape=, _device="/job:localhost/replica:0/task:0/cpu:0"](OptimizeLoss/gradients/lstm/lstm/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3_grad/TensorArrayGrad/TensorArrayGradV3, lstm/lstm/TensorArrayUnstack/range, OptimizeLoss/gradients/lstm/lstm/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3_grad/TensorArrayGrad/gradient_flow)]]
Traceback (most recent call last):
File "/data/projects/content_creator/image2text/im2txt/bazel-bin/im2txt/train.runfiles/im2txt/im2txt/train.py", line 155, in
tf.app.run()
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/platform/app.py", line 44, in run
_sys.exit(main(_sys.argv[:1] + flags_passthrough))
File "/data/projects/content_creator/image2text/im2txt/bazel-bin/im2txt/train.runfiles/im2txt/im2txt/train.py", line 152, in main
saver=saver)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/slim/python/slim/learning.py", line 793, in train
train_step_kwargs)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/slim/python/slim/learning.py", line 530, in train_step
run_metadata=run_metadata)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 767, in run
run_metadata_ptr)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 965, in _run
feed_dict_string, options, run_metadata)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 1015, in _do_run
target_list, options, run_metadata)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 1035, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.UnimplementedError: TensorArray has size zero, but element shape is not fully defined. Currently only static shapes are supported when packing zero-size TensorArrays.
[[Node: OptimizeLoss/gradients/lstm/lstm/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3_grad/TensorArrayGatherV3 = TensorArrayGatherV3[_class=["loc:#lstm/lstm/TensorArray_1"], dtype=DT_FLOAT, element_shape=, _device="/job:localhost/replica:0/task:0/cpu:0"](OptimizeLoss/gradients/lstm/lstm/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3_grad/TensorArrayGrad/TensorArrayGradV3, lstm/lstm/TensorArrayUnstack/range, OptimizeLoss/gradients/lstm/lstm/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3_grad/TensorArrayGrad/gradient_flow)]]
Caused by op u'OptimizeLoss/gradients/lstm/lstm/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3_grad/TensorArrayGatherV3', defined at:
File "/data/projects/content_creator/image2text/im2txt/bazel-bin/im2txt/train.runfiles/im2txt/im2txt/train.py", line 155, in
tf.app.run()
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/platform/app.py", line 44, in run
_sys.exit(main(_sys.argv[:1] + flags_passthrough))
File "/data/projects/content_creator/image2text/im2txt/bazel-bin/im2txt/train.runfiles/im2txt/im2txt/train.py", line 135, in main
learning_rate_decay_fn=learning_rate_decay_fn)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/layers/python/layers/optimizers.py", line 226, in optimize_loss
colocate_gradients_with_ops=colocate_gradients_with_ops)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/optimizer.py", line 345, in compute_gradients
colocate_gradients_with_ops=colocate_gradients_with_ops)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gradients_impl.py", line 482, in gradients
in_grads = grad_fn(op, *out_grads)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/tensor_array_grad.py", line 186, in _TensorArrayScatterGrad
grad = g.gather(indices)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/tensor_array_ops.py", line 328, in gather
element_shape=element_shape)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gen_data_flow_ops.py", line 2226, in _tensor_array_gather_v3
element_shape=element_shape, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/op_def_library.py", line 763, in apply_op
op_def=op_def)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 2327, in create_op
original_op=self._default_original_op, op_def=op_def)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 1226, in init
self._traceback = _extract_stack()
...which was originally created as op u'lstm/lstm/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3', defined at:
File "/data/projects/content_creator/image2text/im2txt/bazel-bin/im2txt/train.runfiles/im2txt/im2txt/train.py", line 155, in
tf.app.run()
[elided 0 identical lines from previous traceback]
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/platform/app.py", line 44, in run
_sys.exit(main(sys.argv[:1] + flags_passthrough))
File "/data/projects/content_creator/image2text/im2txt/bazel-bin/im2txt/train.runfiles/im2txt/im2txt/train.py", line 89, in main
model.build()
File "/data/projects/content_creator/image2text/im2txt/im2txt/show_and_tell_model.py", line 437, in build
self.build_model()
File "/data/projects/content_creator/image2text/im2txt/im2txt/show_and_tell_model.py", line 356, in build_model
scope=lstm_scope)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn.py", line 546, in dynamic_rnn
dtype=dtype)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn.py", line 664, in dynamic_rnn_loop
for ta, input in zip(input_ta, flat_input))
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn.py", line 664, in
for ta, input in zip(input_ta, flat_input))
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/tensor_array_ops.py", line 380, in unstack
indices=math_ops.range(0, num_elements), value=value, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/tensor_array_ops.py", line 408, in scatter
name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gen_data_flow_ops.py", line 2492, in _tensor_array_scatter_v3
name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/op_def_library.py", line 763, in apply_op
op_def=op_def)
UnimplementedError (see above for traceback): TensorArray has size zero, but element shape is not fully defined. Currently only static shapes are supported when packing zero-size TensorArrays.
[[Node: OptimizeLoss/gradients/lstm/lstm/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3_grad/TensorArrayGatherV3 = TensorArrayGatherV3[_class=["loc:#lstm/lstm/TensorArray_1"], dtype=DT_FLOAT, element_shape=, _device="/job:localhost/replica:0/task:0/cpu:0"](OptimizeLoss/gradients/lstm/lstm/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3_grad/TensorArrayGrad/TensorArrayGradV3, lstm/lstm/TensorArrayUnstack/range, OptimizeLoss/gradients/lstm/lstm/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3_grad/TensorArrayGrad/gradient_flow)]]

Adding to vocab during Tensorflow seq2seq run

I am using the Tensorflow seq2seq tutorial to play with machine translation. Say I have trained the model for some time and determine that I want to supplement the original vocab with new words to enhance the quality of the model. Is there a way to pause training, add words to the vocabulary, and then resume training from the most recent checkpoint? I attempted to do so but when I began training again I got this error:
Traceback (most recent call last):
File "execute.py", line 405, in <module>
train()
File "execute.py", line 127, in train
model = create_model(sess, False)
File "execute.py", line 108, in create_model
model.saver.restore(session, ckpt.model_checkpoint_path)
File "/home/jrthom18/.local/lib/python2.7/site- packages/tensorflow/python/training/saver.py", line 1388, in restore
{self.saver_def.filename_tensor_name: save_path})
File "/home/jrthom18/.local/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 766, in run
run_metadata_ptr)
File "/home/jrthom18/.local/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 964, in _run
feed_dict_string, options, run_metadata)
File "/home/jrthom18/.local/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1014, in _do_run
target_list, options, run_metadata)
File "/home/jrthom18/.local/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1034, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Assign requires shapes of both tensors to match. lhs shape= [384633] rhs shape= [384617]
[[Node: save/Assign_82 = Assign[T=DT_FLOAT, _class=["loc:#proj_b"], use_locking=true, validate_shape=true, _device="/job:localhost/replica:0/task:0/cpu:0"](proj_b, save/RestoreV2_82)]]
Caused by op u'save/Assign_82', defined at:
File "execute.py", line 405, in <module>
train()
File "execute.py", line 127, in train
model = create_model(sess, False)
File "execute.py", line 99, in create_model
model = seq2seq_model.Seq2SeqModel( gConfig['enc_vocab_size'], gConfig['dec_vocab_size'], _buckets, gConfig['layer_size'], gConfig['num_layers'], gConfig['max_gradient_norm'], gConfig['batch_size'], gConfig['learning_rate'], gConfig['learning_rate_decay_factor'], forward_only=forward_only)
File "/home/jrthom18/data/3x256_bs32/easy_seq2seq/seq2seq_model.py", line 166, in __init__
self.saver = tf.train.Saver(tf.global_variables(), keep_checkpoint_every_n_hours=2.0)
File "/home/jrthom18/.local/lib/python2.7/site-packages/tensorflow/python/training/saver.py", line 1000, in __init__
self.build()
File "/home/jrthom18/.local/lib/python2.7/site-packages/tensorflow/python/training/saver.py", line 1030, in build
restore_sequentially=self._restore_sequentially)
File "/home/jrthom18/.local/lib/python2.7/site-packages/tensorflow/python/training/saver.py", line 624, in build
restore_sequentially, reshape)
File "/home/jrthom18/.local/lib/python2.7/site-packages/tensorflow/python/training/saver.py", line 373, in _AddRestoreOps
assign_ops.append(saveable.restore(tensors, shapes))
File "/home/jrthom18/.local/lib/python2.7/site-packages/tensorflow/python/training/saver.py", line 130, in restore
self.op.get_shape().is_fully_defined())
File "/home/jrthom18/.local/lib/python2.7/site-packages/tensorflow/python/ops/gen_state_ops.py", line 47, in assign
use_locking=use_locking, name=name)
File "/home/jrthom18/.local/lib/python2.7/site-packages/tensorflow/python/framework/op_def_library.py", line 759, in apply_op
op_def=op_def)
File "/home/jrthom18/.local/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 2240, in create_op
original_op=self._default_original_op, op_def=op_def)
File "/home/jrthom18/.local/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 1128, in __init__
self._traceback = _extract_stack()
InvalidArgumentError (see above for traceback): Assign requires shapes of both tensors to match. lhs shape= [384633] rhs shape= [384617]
[[Node: save/Assign_82 = Assign[T=DT_FLOAT, _class=["loc:#proj_b"], use_locking=true, validate_shape=true, _device="/job:localhost/replica:0/task:0/cpu:0"](proj_b, save/RestoreV2_82)]]
Obviously the new vocab is larger and so the tensor sizes do not match. Is there some way around this?
You can not update your vocab once its set but you can always use shared wordpiece model. It will help you to directly copy out of vocab words form source to target output.