InvalidArgumentError (see above for traceback): slice index 15 of dimension 0 out of bounds - tensorflow

In a task to implement the minimum risk training for a neural machine translation system I need to sample sentences and gather the respective logits for the sampled word IDs. The step of gathering looks like this:
for i in range(1,self._num_of_samples):
logits, _, _, sampled_ids = self.decoder._decoding_loop(train_mode=False,sample=True)
ind=[[[tf.constant(i),tf.constant(j),sampled_ids[i][j]] for j in range(self.batch_size)] for i in range(self.decoder.max_output_len)]
gathered_logits = tf.gather_nd(logits,ind)
sentence_sum_logit = tf.reduce_sum(gathered_logits,0)
self.sample_sen_ids = self.sample_sen_ids.write(steps[i],sampled_ids)
self.sample_logits = self.sample_logits.write(steps[i], sentence_sum_logit)
self.sample_sen_ids = tf.transpose(self.sample_sen_ids.stack())
self.sample_logits = tf.transpose(self.sample_logits.stack())
But I dont get it why after some batches I get this:
Traceback (most recent call last):
File "/home/stoyan/neurmon/lib/python3.5/site-packages/tensorflow/python/client/session.py", line 1022, in _do_call
return fn(*args)
File "/home/stoyan/neurmon/lib/python3.5/site-packages/tensorflow/python/client/session.py", line 1004, in _run_fn
status, run_metadata)
File "/usr/lib/python3.5/contextlib.py", line 66, in __exit__
next(self.gen)
File "/home/stoyan/neurmon/lib/python3.5/site-packages/tensorflow/python/framework/errors_impl.py", line 466, in raise_exception_on_not_ok_status
pywrap_tensorflow.TF_GetCode(status))
tensorflow.python.framework.errors_impl.InvalidArgumentError: slice index 49 of dimension 0 out of bounds.
[[Node: sampling/strided_slice_4900 = StridedSlice[Index=DT_INT32, T=DT_INT32, begin_mask=0, ellipsis_mask=0, end_mask=0, new_axis_mask=0, shrink_axis_mask=1, _device="/job:localhost/replica:0/task:0/cpu:0"](sampling/TensorArrayStack_3/TensorArrayGatherV3, sampling/strided_slice_4900/stack, sampling/strided_slice_4900/stack_1, sampling/strided_slice_4900/stack_2)]]
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "bin/neuralmonkey-train", line 6, in <module>
main()
File "/home/stoyan/neuralmonkey/bin/neuralmonkey/train.py", line 211, in main
initial_variables=cfg.model.initial_variables)
File "/home/stoyan/neuralmonkey/bin/neuralmonkey/learning_utils.py", line 185, in training_loop
results, meta=tf_manager.execute(batch_dataset, [trainer],train=True, summaries=False)
File "/home/stoyan/neuralmonkey/bin/neuralmonkey/tf_manager.py", line 217, in execute
for sess in self.sessions]
File "/home/stoyan/neuralmonkey/bin/neuralmonkey/tf_manager.py", line 217, in <listcomp>
for sess in self.sessions]
File "/home/stoyan/neurmon/lib/python3.5/site-packages/tensorflow/python/client/session.py", line 767, in run
run_metadata_ptr)
File "/home/stoyan/neurmon/lib/python3.5/site-packages/tensorflow/python/client/session.py", line 965, in _run
feed_dict_string, options, run_metadata)
File "/home/stoyan/neurmon/lib/python3.5/site-packages/tensorflow/python/client/session.py", line 1015, in _do_run
target_list, options, run_metadata)
File "/home/stoyan/neurmon/lib/python3.5/site-packages/tensorflow/python/client/session.py", line 1035, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: slice index 49 of dimension 0 out of bounds.
[[Node: sampling/strided_slice_4900 = StridedSlice[Index=DT_INT32, T=DT_INT32, begin_mask=0, ellipsis_mask=0, end_mask=0, new_axis_mask=0, shrink_axis_mask=1, _device="/job:localhost/replica:0/task:0/cpu:0"](sampling/TensorArrayStack_3/TensorArrayGatherV3, sampling/strided_slice_4900/stack, sampling/strided_slice_4900/stack_1, sampling/strided_slice_4900/stack_2)]]
Caused by op 'sampling/strided_slice_4900', defined at:
File "bin/neuralmonkey-train", line 6, in <module>
main()
File "/home/stoyan/neuralmonkey/bin/neuralmonkey/train.py", line 170, in main
cfg.build_model(warn_unused=True)
File "/home/stoyan/neuralmonkey/bin/neuralmonkey/config/configuration.py", line 86, in build_model
model = build_config(self.config_dict, self.ignored, warn_unused)
File "/home/stoyan/neuralmonkey/bin/neuralmonkey/config/builder.py", line 198, in build_config
value, config_dicts, existing_objects, 0)
File "/home/stoyan/neuralmonkey/bin/neuralmonkey/config/builder.py", line 109, in build_object
obj = instantiate_class(value[7:], all_dicts, existing_objects, depth)
File "/home/stoyan/neuralmonkey/bin/neuralmonkey/config/builder.py", line 165, in instantiate_class
obj = clazz(*bounded_params.args, **bounded_params.kwargs)
File "/home/stoyan/neuralmonkey/bin/neuralmonkey/trainers/mrt_trainer.py", line 80, in __init__
ind=[[[tf.constant(i),tf.constant(j),sampled_ids[i][j]] for j in range(self.batch_size)] for i in range(self.decoder.max_output_len)]
File "/home/stoyan/neuralmonkey/bin/neuralmonkey/trainers/mrt_trainer.py", line 80, in <listcomp>
ind=[[[tf.constant(i),tf.constant(j),sampled_ids[i][j]] for j in range(self.batch_size)] for i in range(self.decoder.max_output_len)]
File "/home/stoyan/neuralmonkey/bin/neuralmonkey/trainers/mrt_trainer.py", line 80, in <listcomp>
ind=[[[tf.constant(i),tf.constant(j),sampled_ids[i][j]] for j in range(self.batch_size)] for i in range(self.decoder.max_output_len)]
File "/home/stoyan/neurmon/lib/python3.5/site-packages/tensorflow/python/ops/array_ops.py", line 495, in _SliceHelper
name=name)
File "/home/stoyan/neurmon/lib/python3.5/site-packages/tensorflow/python/ops/array_ops.py", line 653, in strided_slice
shrink_axis_mask=shrink_axis_mask)
File "/home/stoyan/neurmon/lib/python3.5/site-packages/tensorflow/python/ops/gen_array_ops.py", line 3688, in strided_slice
shrink_axis_mask=shrink_axis_mask, name=name)
File "/home/stoyan/neurmon/lib/python3.5/site-packages/tensorflow/python/framework/op_def_library.py", line 763, in apply_op
op_def=op_def)
File "/home/stoyan/neurmon/lib/python3.5/site-packages/tensorflow/python/framework/ops.py", line 2327, in create_op
original_op=self._default_original_op, op_def=op_def)
File "/home/stoyan/neurmon/lib/python3.5/site-packages/tensorflow/python/framework/ops.py", line 1226, in __init__
self._traceback = _extract_stack()
InvalidArgumentError (see above for traceback): slice index 49 of dimension 0 out of bounds.
[[Node: sampling/strided_slice_4900 = StridedSlice[Index=DT_INT32, T=DT_INT32, begin_mask=0, ellipsis_mask=0, end_mask=0, new_axis_mask=0, shrink_axis_mask=1, _device="/job:localhost/replica:0/task:0/cpu:0"](sampling/TensorArrayStack_3/TensorArrayGatherV3, sampling/strided_slice_4900/stack, sampling/strided_slice_4900/stack_1, sampling/strided_slice_4900/stack_2)]]
What should this InvalidArgumentError refer to and what goes wrong?
Best,
Stoyan

According to the stack trace, the error comes from this expression in your code:
sampled_ids[i][j]
...but it's hard to tell without context whether it comes from taking the [i] slice or the [j] slice. Presumably one of the tensors in this structure has fewer than 15 (or 49 in the error message) elements in the 0th dimension. Often this can happen if your input data includes word IDs that are not present in the vocabulary used for training the model.

Related

tensorflow v1 GradientTape: AttributeError: 'NoneType' object has no attribute 'eval'

I want to compute the gradient of the distance between the NSynth WaveNet encoding of two sine waves.
This is tensorflow v1.
I am working with code based upon https://github.com/magenta/magenta/blob/master/magenta/models/nsynth/wavenet/fastgen.py
A minimal example of my bug is in this colab notebook: https://colab.research.google.com/drive/1oTEU8QAaOs0K1A0KHrAdt7kA7MkadNDr?usp=sharing
Here is the code:
# Commented out IPython magic to ensure Python compatibility.
# %tensorflow_version 1.x
!pip3 install -q magenta
!wget -c http://download.magenta.tensorflow.org/models/nsynth/wavenet-ckpt.tar && tar xvf wavenet-ckpt.tar
checkpoint_path = './wavenet-ckpt/model.ckpt-200000'
import math
from magenta.models.nsynth.wavenet import fastgen
import tensorflow as tf
session_config = tf.ConfigProto(allow_soft_placement=True)
session_config.gpu_options.allow_growth = True
sess = tf.Session(config=session_config)
pi = 3.1415926535897
SR = 16000
sample_length = 64000
DURATION_SECONDS = sample_length / SR
def sine(hz):
time = tf.linspace(0.0, DURATION_SECONDS, sample_length)
return tf.constant(0.5) * tf.cos(2.0 * pi * time * hz)
net = fastgen.load_nsynth(batch_size=2, sample_length=sample_length)
saver = tf.train.Saver()
saver.restore(sess, checkpoint_path)
"""We have two sine waves at 440 and 660 Hz. We use the encoder to generate two (125, 16) encodings:"""
twosines = tf.stack([sine(440), sine(660)]).eval(session=sess)
print(sess.run(net["encoding"], feed_dict={net["X"]: twosines}).shape)
"""Compute the distance between the two sine waves"""
distencode = tf.reduce_mean(tf.abs(net["encoding"][0] - net["encoding"][1]))
print(sess.run(distencode, feed_dict={net["X"]: twosines}))
"""I don't know why the following code doesn't work, but if I did I could solve the real task....
"""
net["X"] = twosines
distencode.eval(session=sess)
"""Here is the code that I need to work. I want to compute the gradient of the distance between the NSynth encoding of two sine waves:"""
fp = tf.constant(660.0)
newsines = tf.stack([sine(440), sine(fp)])
with tf.GradientTape() as g:
g.watch(fp)
dd_dfp = g.gradient(distencode, fp)
print(dd_dfp.eval(session=sess))
The last block, which I want to evaluate, gets the following error:
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-12-b5b8cdd00b24> in <module>()
4 g.watch(fp)
5 dd_dfp = g.gradient(distencode, fp)
----> 6 print(dd_dfp.eval(session=sess))
AttributeError: 'NoneType' object has no attribute 'eval'
I believe I need to define the operations to be executed within this block. However, I am using a pretrained model that I am just computing the distance over, so I am not sure how to define execution in that block.
The second-to-last block, which would help me fix the last block, gives the following error:
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-10-c3411dcbfa2c> in <module>()
3 with tf.GradientTape() as g:
4 g.watch(fp)
----> 5 dd_dfp = g.gradient(distencode, g)
6 print(dd_dfp.eval(session=sess))
/tensorflow-1.15.2/python3.6/tensorflow_core/python/eager/backprop.py in gradient(self, target, sources, output_gradients, unconnected_gradients)
997 flat_sources = [_handle_or_self(x) for x in flat_sources]
998 for t in flat_sources_raw:
--> 999 if not t.dtype.is_floating:
1000 logging.vlog(
1001 logging.WARN, "The dtype of the source tensor must be "
AttributeError: 'GradientTape' object has no attribute 'dtype'
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
/tensorflow-1.15.2/python3.6/tensorflow_core/python/client/session.py in _do_call(self, fn, *args)
1364 try:
-> 1365 return fn(*args)
1366 except errors.OpError as e:
8 frames
InvalidArgumentError: 2 root error(s) found.
(0) Invalid argument: You must feed a value for placeholder tensor 'Placeholder' with dtype float and shape [2,64000]
[[{{node Placeholder}}]]
[[Mean/_759]]
(1) Invalid argument: You must feed a value for placeholder tensor 'Placeholder' with dtype float and shape [2,64000]
[[{{node Placeholder}}]]
0 successful operations.
0 derived errors ignored.
During handling of the above exception, another exception occurred:
InvalidArgumentError Traceback (most recent call last)
/tensorflow-1.15.2/python3.6/tensorflow_core/python/client/session.py in _do_call(self, fn, *args)
1382 '\nsession_config.graph_options.rewrite_options.'
1383 'disable_meta_optimizer = True')
-> 1384 raise type(e)(node_def, op, message)
1385
1386 def _extend_graph(self):
InvalidArgumentError: 2 root error(s) found.
(0) Invalid argument: You must feed a value for placeholder tensor 'Placeholder' with dtype float and shape [2,64000]
[[node Placeholder (defined at /tensorflow-1.15.2/python3.6/tensorflow_core/python/framework/ops.py:1748) ]]
[[Mean/_759]]
(1) Invalid argument: You must feed a value for placeholder tensor 'Placeholder' with dtype float and shape [2,64000]
[[node Placeholder (defined at /tensorflow-1.15.2/python3.6/tensorflow_core/python/framework/ops.py:1748) ]]
0 successful operations.
0 derived errors ignored.
Original stack trace for 'Placeholder':
File "/usr/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py", line 16, in <module>
app.launch_new_instance()
File "/usr/local/lib/python3.6/dist-packages/traitlets/config/application.py", line 664, in launch_instance
app.start()
File "/usr/local/lib/python3.6/dist-packages/ipykernel/kernelapp.py", line 499, in start
self.io_loop.start()
File "/usr/local/lib/python3.6/dist-packages/tornado/platform/asyncio.py", line 132, in start
self.asyncio_loop.run_forever()
File "/usr/lib/python3.6/asyncio/base_events.py", line 438, in run_forever
self._run_once()
File "/usr/lib/python3.6/asyncio/base_events.py", line 1451, in _run_once
handle._run()
File "/usr/lib/python3.6/asyncio/events.py", line 145, in _run
self._callback(*self._args)
File "/usr/local/lib/python3.6/dist-packages/tornado/ioloop.py", line 758, in _run_callback
ret = callback()
File "/usr/local/lib/python3.6/dist-packages/tornado/stack_context.py", line 300, in null_wrapper
return fn(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/zmq/eventloop/zmqstream.py", line 548, in <lambda>
self.io_loop.add_callback(lambda : self._handle_events(self.socket, 0))
File "/usr/local/lib/python3.6/dist-packages/zmq/eventloop/zmqstream.py", line 462, in _handle_events
self._handle_recv()
File "/usr/local/lib/python3.6/dist-packages/zmq/eventloop/zmqstream.py", line 492, in _handle_recv
self._run_callback(callback, msg)
File "/usr/local/lib/python3.6/dist-packages/zmq/eventloop/zmqstream.py", line 444, in _run_callback
callback(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/tornado/stack_context.py", line 300, in null_wrapper
return fn(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/ipykernel/kernelbase.py", line 283, in dispatcher
return self.dispatch_shell(stream, msg)
File "/usr/local/lib/python3.6/dist-packages/ipykernel/kernelbase.py", line 233, in dispatch_shell
handler(stream, idents, msg)
File "/usr/local/lib/python3.6/dist-packages/ipykernel/kernelbase.py", line 399, in execute_request
user_expressions, allow_stdin)
File "/usr/local/lib/python3.6/dist-packages/ipykernel/ipkernel.py", line 208, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "/usr/local/lib/python3.6/dist-packages/ipykernel/zmqshell.py", line 537, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/IPython/core/interactiveshell.py", line 2718, in run_cell
interactivity=interactivity, compiler=compiler, result=result)
File "/usr/local/lib/python3.6/dist-packages/IPython/core/interactiveshell.py", line 2822, in run_ast_nodes
if self.run_code(code, result):
File "/usr/local/lib/python3.6/dist-packages/IPython/core/interactiveshell.py", line 2882, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-5-5120c8282e75>", line 1, in <module>
net = fastgen.load_nsynth(batch_size=2, sample_length=sample_length)
File "/tensorflow-1.15.2/python3.6/magenta/models/nsynth/wavenet/fastgen.py", line 64, in load_nsynth
x = tf.placeholder(tf.float32, shape=[batch_size, sample_length])
File "/tensorflow-1.15.2/python3.6/tensorflow_core/python/ops/array_ops.py", line 2619, in placeholder
return gen_array_ops.placeholder(dtype=dtype, shape=shape, name=name)
File "/tensorflow-1.15.2/python3.6/tensorflow_core/python/ops/gen_array_ops.py", line 6669, in placeholder
"Placeholder", dtype=dtype, shape=shape, name=name)
File "/tensorflow-1.15.2/python3.6/tensorflow_core/python/framework/op_def_library.py", line 794, in _apply_op_helper
op_def=op_def)
File "/tensorflow-1.15.2/python3.6/tensorflow_core/python/util/deprecation.py", line 507, in new_func
return func(*args, **kwargs)
File "/tensorflow-1.15.2/python3.6/tensorflow_core/python/framework/ops.py", line 3357, in create_op
attrs, op_def, compute_device)
File "/tensorflow-1.15.2/python3.6/tensorflow_core/python/framework/ops.py", line 3426, in _create_op_internal
op_def=op_def)
File "/tensorflow-1.15.2/python3.6/tensorflow_core/python/framework/ops.py", line 1748, in __init__
self._traceback = tf_stack.extract_stack()
Thank you.

Tensorflow error calling model.fit after load_model

I'm training a small, simple neural net for a basic problem of regulating a motor's speed. I want to be able to save the model and exit the program, then load it later and resume training.
Here's the relevant code:
self.model = Sequential()
self.model.add(InputLayer(2))
self.model.add(Dense(6, activation='relu'))
self.model.add(Dense(9, activation='linear'))
self.model.compile(loss='mse', optimizer='adam', metrics=['mae'])
# ... Loop for training and Evaluation (Deep Q Learner) ...
learn(self.model)
self.model.save('motor_model', save_format='tf')
Now after it's trained I want to be able to load the model and continue training
self.model = models.load_model('motor_model', compile=False)
# ... Loop for training and Evaluation (Deep Q Learner) ...
learn(self.model)
The first time I run the model it works fine. However, after saving and loading the model it does not. Upon loading the model I am able to call the predict function:
prediction = self.model.predict(currentInput)
However, It fails when I call the predict function:
self.model.fit(self.input, target_vec.reshape(-1, 9), epochs=1, verbose=0)
The error I get is:
2019-12-07 07:22:00.762174: W tensorflow/c/c_api.cc:326] Operation
'{name:'sequential/dense/StatefulPartitionedCall'
id:33 op device:{} def:{{{node
sequential/dense/StatefulPartitionedCall}} =
StatefulPartitionedCall[Tin=[DT_FLOAT, DT
_RESOURCE, DT_RESOURCE], Tout=[DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT], _gradient_op_type="PartitionedCall-298", conf
ig="",
config_proto="\n\007\n\003CPU\020\001\n\007\n\003GPU\020\0002\002J\0008\001",
executor_type="", f=__forward_re
stored_function_body_509[]](input_1, dense/kernel, dense/bias)}}' was
changed by setting attribute after it was run b
y a session. This mutation will have no effect, and will trigger an
error in the future. Either don't modify nodes af
ter running them or create a new session. 2019-12-07 07:22:03.320478:
W tensorflow/python/util/util.cc:299] Sets are not currently
considered sequences, but th
is may change in the future, so consider avoiding using them.
Traceback (most recent call last): File
"/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py",
line 1363, in _do_call
return fn(*args) File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py",
line 1346, in _run_fn
self._extend_graph() File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py",
line 1386, in _extend_graph
tf_session.ExtendSession(self._session) tensorflow.python.framework.errors_impl.InvalidArgumentError: Node
'training/Adam/gradients/gradients/sequential/dens
e_1/StatefulPartitionedCall_grad/PartitionedCall': Connecting to
invalid output 1 of source node sequential/dense_1/S
tatefulPartitionedCall which has 1 outputs.
During handling of the above exception, another exception occurred:
Traceback (most recent call last): File "ct2.py", line 47, in
leftController.to_position(target, overrideAction) File "/opt/mowzr/motor_controller.py", line 94, in to_position
self.model.fit(self.prevInput, target_vec.reshape(-1, 9), epochs=1, verbose=0) File
"/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/engine/training.py",
line 766, in fit
use_multiprocessing=use_multiprocessing) File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/engine/training_arrays.py",
line 680, in
fit
steps_name='steps_per_epoch') File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/engine/training_arrays.py",
line 275, in
model_iteration
model.reset_metrics() File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/engine/training.py",
line 953, in reset_m
etrics
m.reset_states() File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/metrics.py",
line 209, in reset_states
K.batch_set_value([(v, 0) for v in self.variables]) File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/backend.py",
line 3343, in batch_set_valu
e
get_session().run(assign_ops, feed_dict=feed_dict) File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/backend.py",
line 490, in get_session
_initialize_variables(session) File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/backend.py",
line 905, in _initialize_var
iables
[variables_module.is_variable_initialized(v) for v in candidate_vars]) File
"/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py",
line 956, in run
run_metadata_ptr) File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py",
line 1179, in _run
feed_dict_tensor, options, run_metadata) File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py",
line 1357, in _do_run
run_metadata) File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py",
line 1382, in _do_call
raise type(e)(node_def, op, message) tensorflow.python.framework.errors_impl.InvalidArgumentError: Node
'training/Adam/gradients/gradients/sequential/dens
e_1/StatefulPartitionedCall_grad/PartitionedCall': Connecting to
invalid output 1 of source node sequential/dense_1/S
tatefulPartitionedCall which has 1 outputs.
During handling of the above exception, another exception occurred:
Traceback (most recent call last): File
"/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py",
line 1363, in _do_call
return fn(*args) File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py",
line 1346, in _run_fn
self._extend_graph() File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py",
line 1386, in _extend_graph
tf_session.ExtendSession(self._session) tensorflow.python.framework.errors_impl.InvalidArgumentError: Node
'training/Adam/gradients/gradients/sequential/dens
e_1/StatefulPartitionedCall_grad/PartitionedCall': Connecting to
invalid output 1 of source node sequential/dense_1/S
tatefulPartitionedCall which has 1 outputs.
During handling of the above exception, another exception occurred:
Traceback (most recent call last): File "ct2.py", line 53, in
leftController.saveModel() File "/opt/mowzr/motor_controller.py", line 116, in saveModel
self.model.save('motor_model', save_format='tf') File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/engine/network.py",
line 986, in save
signatures, options) File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/saving/save.py",
line 115, in save_model
signatures, options) File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/saving/saved_model/save.py",
line 74, in
save
save_lib.save(model, filepath, signatures, options) File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/saved_model/save.py",
line 924, in save
object_saver.save(utils_impl.get_variables_path(export_dir)) File
"/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/training/tracking/util.py",
line 1161, in save
session = get_session() File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/training/tracking/util.py",
line 71, in get_ses
sion
session = keras_backend.get_session() File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/backend.py",
line 490, in get_session
_initialize_variables(session) File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/backend.py",
line 905, in _initialize_var
iables
[variables_module.is_variable_initialized(v) for v in candidate_vars]) File
"/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py",
line 956, in run
run_metadata_ptr) File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py",
line 1179, in _run
feed_dict_tensor, options, run_metadata) File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py",
line 1357, in _do_run
run_metadata) File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py",
line 1382, in _do_call
raise type(e)(node_def, op, message) tensorflow.python.framework.errors_impl.InvalidArgumentError: Node
'training/Adam/gradients/gradients/sequential/dens
e_1/StatefulPartitionedCall_grad/PartitionedCall': Connecting to
invalid output 1 of source node sequential/dense_1/S
tatefulPartitionedCall which has 1 outputs.
I got the same error.
I don't know what exactly produces this error but there is a way to solve it (not a pretty one though). Create the model with the same architecture and just set the weights to the loaded model weights:
self.model = self.create_model()
self.model.set_weights(load_model("sample.model").get_weights())

no kernel image is available for execution on the device

I training maskrcnn ,use tf-1.2 can train, but I use tf-1.5 it not training
The error is as follows:
Caused by op u'pyramid_1/AssignGTBoxes/Where_6', defined at:
File "/home/zhouzd2/letrain/applications/letrain.py", line 349, in <module>
tf.app.run()
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/platform/app.py", line 124, in run
_sys.exit(main(argv))
File "/home/zhouzd2/letrain/applications/letrain.py", line 346, in main
LeTrain().model_train(user_mode)
File "/home/zhouzd2/letrain/platform/base_train.py", line 1228, in model_train
cluster=self.cluster_spec)
File "/home/zhouzd2/letrain/platform/deployment/model_deploy.py", line 226, in create_clones
outputs, feed_ops,verify_model_loss = model_fn(*args, **kwargs)
File "/home/zhouzd2/letrain/platform/base_train.py", line 1195, in clone_fn
model_loss, end_points, feed_ops = network_fn(data_direct, data_batch, int_network_fn)
File "/home/zhouzd2/letrain/applications/letrain.py", line 214, in get_loss
FLAGS.batch_size)
File "/home/zhouzd2/letrain/applications/fmrcnn/get_fmrcnn_loss.py", line 23, in model_fn
loss_weights=[0.2, 0.2, 1.0, 0.2, 1.0])
File "/home/zhouzd2/letrain/applications/fmrcnn/libs/nets/pyramid_network.py", line 580, in build
is_training=is_training, gt_boxes=gt_boxes)
File "/home/zhouzd2/letrain/applications/fmrcnn/libs/nets/pyramid_network.py", line 263, in build_heads
assign_boxes(rois, [rois, batch_inds], [2, 3, 4, 5])
File "/home/zhouzd2/letrain/applications/fmrcnn/libs/layers/wrapper.py", line 173, in assign_boxes
inds = tf.where(tf.equal(assigned_layers, l))
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/array_ops.py", line 2538, in where
return gen_array_ops.where(condition=condition, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gen_array_ops.py", line 6087, in where
"Where", input=condition, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/op_def_library.py", line 787, in _apply_op_helper
op_def=op_def)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 3160, in create_op
op_def=op_def)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 1625, in __init__
self._traceback = self._graph._extract_stack() # pylint: disable=protected-access
InternalError (see above for traceback): WhereOp: Could not launch cub::DeviceReduce::Sum to count number of true / nonzero indices. temp_storage_bytes: 1, status: no kernel image is available for execution on the device
[[Node: pyramid_1/AssignGTBoxes/Where_6 = Where[T=DT_BOOL, _device="/job:worker/replica:0/task:0/device:GPU:0"](pyramid_1/AssignGTBoxes/Equal_6_S9493)]]
[[Node: pyramid_1/AssignGTBoxes/Reshape_8_G1028 = _Recv[client_terminated=false, recv_device="/job:worker/replica:0/task:0/device:CPU:0", send_device="/job:worker/replica:0/task:0/device:GPU:0", send_device_incarnation=5407481677180697062, tensor_name="edge_1349_pyramid_1/AssignGTBoxes/Reshape_8", tensor_type=DT_INT64, _device="/job:worker/replica:0/task:0/device:CPU:0"]()]]
No problem when loading calculation graphs, error is reported in sess.run()。
Does anyone know how to solve this problem? Or does anyone know what function can replace tf.where?
Thank you!
If you are using Visual Studio:
Right click on the project > Properies > Cuda C/C++ > Device
and add the following to Code Generation field
compute_30,sm_30;compute_35,sm_35;compute_37,sm_37;compute_50,sm_50;compute_52,sm_52;compute_60,sm_60;compute_61,sm_61;compute_70,sm_70;compute_75,sm_75;

Tensorflow error "feed a value for placeholder tensor" occurs on the second RBM in a deep belief net

I first constructed an RBM and tested it on a set of data, it worked well. Then I wrote a DBN with stacked RBM and trained it with the same set of data. The program stopped with the following error when it tried to train the second RBM.
Traceback (most recent call last):
File "D:\Python\DL_DG\analysis\debug\debug_01_ppi.py", line 44, in <module>
ppi_dbn.fit(ppi_in)
File "D:/Python/DL_DG/Model\dbn_test.py", line 95, in fit
rbm.fit(input_data)
File "D:/Python/DL_DG/Model\rbm_test.py", line 295, in fit
self.partial_fit(batch_x, b, e)
File "D:/Python/DL_DG/Model\rbm_test.py", line 188, in partial_fit
feed_dict={self.x: batch_x})
File "C:\Users\pil562\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\client\session.py", line 895, in run
run_metadata_ptr)
File "C:\Users\pil562\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\client\session.py", line 1124, in _run
feed_dict_tensor, options, run_metadata)
File "C:\Users\pil562\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\client\session.py", line 1321, in _do_run
options, run_metadata)
File "C:\Users\pil562\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\client\session.py", line 1340, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: You must feed a value for placeholder tensor 'input/x' with dtype float and shape [?,128]
[[Node: input/x = Placeholder[dtype=DT_FLOAT, shape=[?,128], _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
Caused by op 'input/x', defined at:
File "<string>", line 1, in <module>
File "C:\Users\pil562\AppData\Local\Programs\Python\Python36\lib\idlelib\run.py", line 142, in main
ret = method(*args, **kwargs)
File "C:\Users\pil562\AppData\Local\Programs\Python\Python36\lib\idlelib\run.py", line 460, in runcode
exec(code, self.locals)
File "D:\Python\DL_DG\analysis\debug\debug_01_ppi.py", line 42, in <module>
learning_rate_rbm=[0.001,0.01],rbm_gauss_visible=True)
File "D:/Python/DL_DG/Model\dbn_test.py", line 52, in __init__
sample_gauss_visible=self.sample_gauss_visible, sigma=self.sigma))
File "D:/Python/DL_DG/Model\rbm_test.py", line 358, in __init__
xavier_const,err_function,use_tqdm,tqdm)
File "D:/Python/DL_DG/Model\rbm_test.py", line 46, in __init__
self.x = tf.placeholder(tf.float32, [None, self.n_visible],name='x')
File "C:\Users\pil562\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\ops\array_ops.py", line 1548, in placeholder
return gen_array_ops._placeholder(dtype=dtype, shape=shape, name=name)
File "C:\Users\pil562\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\ops\gen_array_ops.py", line 2094, in _placeholder
name=name)
File "C:\Users\pil562\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 767, in apply_op
op_def=op_def)
File "C:\Users\pil562\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\framework\ops.py", line 2630, in create_op
original_op=self._default_original_op, op_def=op_def)
File "C:\Users\pil562\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\framework\ops.py", line 1204, in __init__
self._traceback = self._graph._extract_stack() # pylint: disable=protected-access
InvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'input/x' with dtype float and shape [?,128]
[[Node: input/x = Placeholder[dtype=DT_FLOAT, shape=[?,128], _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
The error occurs at the following function:
def partial_fit(self, batch_x, k, j):
print(batch_x.dtype, batch_x.shape)
summary, _ = self.sess.run([self.merged, self.update_weights + self.update_deltas],
feed_dict={self.x: batch_x})
self.train_writer.add_summary(summary, k*self.batch_size+j)
I output the type and shape of batch_x. The shape is the same during the whole training process. The type is float64 when training the first rbm, and float32 when training the second rbm. That's where it stopped and throw out the error.
The DBN worked well when I didn't compute the summary and just used the following code:
self.sess.run(self.update_weights + self.update_deltas,feed_dict={self.x: batch_x})
It also worked well if I only train a single RBM (with or without the summary).
The batch_x used to train the second RBM is probabilities of the hidden layer in the first RBM.
Could somebody help me solve this problem? I'm not sure if the float64 is the problem.
I guess it's hard for anyone to solve the problem only with the two pieces of code I give. lol. The full code is too long to post here.
I save the output of the first RBM and use it as input to train another RBM. It works well. Thus, I think the problem is not the type or shape of the feeded batch_x, but the structure of the DBN, or the way I collected summaries.
Hope my situation can help others with similar problems.

Uninitialized value error with tf.metrics.accuracy after both local and global initializers are called

I am trying to put together a metric for coarse segmentation with on tensorflow for using with keras:
https://gist.github.com/DSLituiev/1adcc94b7e4e2b1861d39bbbc2db7307
I read this issue, which suggests one must use local variable initializer, which I did, but I am still getting the exception:
Caused by op 'metrics/accuracy_per_channel/accuracy/AssignAdd', defined at:
File "segm_test.py", line 96, in <module>
metrics=[accuracy_per_channel],
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/keras/models.py", line 784, in compile
**kwargs)
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/keras/engine/training.py", line 924, in compile
handle_metrics(output_metrics)
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/keras/engine/training.py", line 921, in handle_metrics
mask=masks[i])
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/keras/engine/training.py", line 450, in weighted
score_array = fn(y_true, y_pred)
File "segm_test.py", line 89, in accuracy_per_channel
return metric_per_channel_tf(x,y, nch=2, metric=tf.metrics.accuracy)
File "segm_test.py", line 34, in metric_per_channel_tf
_, prec_ = metric(label_channel, pred_channel)
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/tensorflow/python/ops/metrics_impl.py", line 411, in accuracy
updates_collections, name or 'accuracy')
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/tensorflow/python/ops/metrics_impl.py", line 344, in mean
update_total_op = state_ops.assign_add(total, math_ops.reduce_sum(values))
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/tensorflow/python/ops/state_ops.py", line 239, in assign_add
ref, value, use_locking=use_locking, name=name)
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/tensorflow/python/ops/gen_state_ops.py", line 71, in assign_add
use_locking=use_locking, name=name)
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py", line 767, in apply_op
op_def=op_def)
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 2506, in create_op
original_op=self._default_original_op, op_def=op_def)
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 1269, in __init__
self._traceback = _extract_stack()
FailedPreconditionError (see above for traceback): Attempting to use uninitialized value metrics/accuracy_per_channel/accuracy/total
[[Node: metrics/accuracy_per_channel/accuracy/AssignAdd = AssignAdd[T=DT_FLOAT, _class=["loc:#metrics/accuracy_per_channel/accuracy/total"], use_locking=false, _device="/job:localhost/replica:0/task:0/gpu:0"](metrics/accuracy_per_channel/accuracy/total, metrics/accuracy_per_channel/accuracy/Sum)]]
[[Node: metrics/accuracy_per_channel/Mean/_43 = _Recv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/cpu:0", send_device="/job:localhost/replica:0/task:0/gpu:0", send_device_incarnation=1, tensor_name="edge_858_metrics/accuracy_per_channel/Mean", tensor_type=DT_FLOAT, _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
I tried to use tf.get_variable('metrics/accuracy_per_channel/accuracy/total') in an interactive debugger, but I am getting:
ValueError: Shape of a new variable (metrics/accuracy_per_channel/accuracy/total) must be fully defined, but instead was <unknown>.
Which to my understanding implies that the variable does not exist at all?
Any tips how to proceed?