Tensorflow: restore checkpoint variables into distributed setup - tensorflow

I have a saved checkpoint generated by graph code in a regular non-distributed setup with the constraint with tf.device('/cpu:0'): (to force model params to reside on CPU instead of GPU).
Now I converted the same code/graph to a distributed setting following the guidelines in TF-Inception.
Now when I try to restore the checkpoint in distributed setup, I get device mismatch errors. Is there a way to override the requirements saved in the checkpoint file or something?
My new distributed code has the Saver and scopes defined as:
if FLAGS.job_name == 'worker':
with tf.device(tf.train.replica_device_setter(
worker_device="/job:worker/task:%d" % FLAGS.task_id,
cluster=cluster_spec)):
# ...same network-graph code... #
restorer = tf.train.Saver()
with tf.Session() as sess:
restorer.restore(sess, 'ResNet-L50.ckpt')
My cluster has one ps and one worker, and both are on localhost. Error line:
tensorflow.python.framework.errors.InvalidArgumentError: Cannot assign a device to node 'save/restore_slice_268/shape_and_slice': Could not satisfy explicit device specification '/job:ps/task:0/device:CPU:0' because no devices matching that specification are registered in this process; available devices: /job:localhost/replica:0/task:0/cpu:0, /job:localhost/replica:0/task:0/gpu:0
[[Node: save/restore_slice_268/shape_and_slice = Const[dtype=DT_STRING, value=Tensor<type: string shape: [] values: >, _device="/job:ps/task:0/device:CPU:0"]()]]
Full error trace:
I tensorflow/core/common_runtime/gpu/gpu_device.cc:756] Creating TensorFlow device (/gpu:0) -> (device: 0, name: Quadro K2200, pci bus id: 0000:01:00.0)
Traceback (most recent call last):
File "dlaunch.py", line 85, in <module>
tf.app.run() # (tf.app.flags parsed here)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/platform/app.py", line 30, in run
sys.exit(main(sys.argv))
File "dlaunch.py", line 81, in main
dtrainer.train(server.target, cluster_spec)
File "/home/muneeb/parkingtf/dtrainer.py", line 88, in train
restorer.restore(sess, 'ResNet-L50.ckpt')
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/saver.py", line 1103, in restore
{self.saver_def.filename_tensor_name: save_path})
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 328, in run
run_metadata_ptr)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 563, in _run
feed_dict_string, options, run_metadata)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 636, in _do_run
target_list, options, run_metadata)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 658, in _do_call
e.code)
tensorflow.python.framework.errors.InvalidArgumentError: Cannot assign a device to node 'save/restore_slice_268/shape_and_slice': Could not satisfy explicit device specification '/job:ps/task:0/device:CPU:0' because no devices matching that specification are registered in this process; available devices: /job:localhost/replica:0/task:0/cpu:0, /job:localhost/replica:0/task:0/gpu:0
[[Node: save/restore_slice_268/shape_and_slice = Const[dtype=DT_STRING, value=Tensor<type: string shape: [] values: >, _device="/job:ps/task:0/device:CPU:0"]()]]
Caused by op u'save/restore_slice_268/shape_and_slice', defined at:
File "dlaunch.py", line 85, in <module>
tf.app.run() # (tf.app.flags parsed here)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/platform/app.py", line 30, in run
sys.exit(main(sys.argv))
File "dlaunch.py", line 81, in main
dtrainer.train(server.target, cluster_spec)
File "/home/muneeb/parkingtf/dtrainer.py", line 86, in train
restorer = tf.train.Saver()
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/saver.py", line 845, in __init__
restore_sequentially=restore_sequentially)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/saver.py", line 515, in build
filename_tensor, vars_to_save, restore_sequentially, reshape)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/saver.py", line 271, in _AddRestoreOps
values = self.restore_op(filename_tensor, vs, preferred_shard)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/saver.py", line 186, in restore_op
preferred_shard=preferred_shard)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/io_ops.py", line 201, in _restore_slice
preferred_shard, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gen_io_ops.py", line 271, in _restore_slice
preferred_shard=preferred_shard, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/op_def_library.py", line 444, in apply_op
as_ref=input_arg.is_ref)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 566, in convert_to_tensor
ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/constant_op.py", line 179, in _constant_tensor_conversion_function
return constant(v, dtype=dtype, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/constant_op.py", line 166, in constant
attrs={"value": tensor_value, "dtype": dtype_value}, name=name).outputs[0]
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 2162, in create_op
original_op=self._default_original_op, op_def=op_def)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 1161, in __init__
self._traceback = _extract_stack()

The following line:
with tf.Session() as sess:
...is responsible for the error. Passing no arguments to tf.Session() creates an in-process session that can only use devices on the local machine. To work in the distributed mode, you should have something like:
# Assuming you created `server = tf.train.Server(...)` earlier.
with tf.Session(server.target) as sess:
...or, if you are connecting to a different process:
# Assuming your server is in a different process.
with tf.Session("grpc://..."):
Note that the devices are not stored in the checkpoint file, but they are being added by the tf.train.replica_device_setter(). Device configuration is a bit tricky right now, and it's something that we're working to simplify.

Related

Tensorflow error calling model.fit after load_model

I'm training a small, simple neural net for a basic problem of regulating a motor's speed. I want to be able to save the model and exit the program, then load it later and resume training.
Here's the relevant code:
self.model = Sequential()
self.model.add(InputLayer(2))
self.model.add(Dense(6, activation='relu'))
self.model.add(Dense(9, activation='linear'))
self.model.compile(loss='mse', optimizer='adam', metrics=['mae'])
# ... Loop for training and Evaluation (Deep Q Learner) ...
learn(self.model)
self.model.save('motor_model', save_format='tf')
Now after it's trained I want to be able to load the model and continue training
self.model = models.load_model('motor_model', compile=False)
# ... Loop for training and Evaluation (Deep Q Learner) ...
learn(self.model)
The first time I run the model it works fine. However, after saving and loading the model it does not. Upon loading the model I am able to call the predict function:
prediction = self.model.predict(currentInput)
However, It fails when I call the predict function:
self.model.fit(self.input, target_vec.reshape(-1, 9), epochs=1, verbose=0)
The error I get is:
2019-12-07 07:22:00.762174: W tensorflow/c/c_api.cc:326] Operation
'{name:'sequential/dense/StatefulPartitionedCall'
id:33 op device:{} def:{{{node
sequential/dense/StatefulPartitionedCall}} =
StatefulPartitionedCall[Tin=[DT_FLOAT, DT
_RESOURCE, DT_RESOURCE], Tout=[DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT], _gradient_op_type="PartitionedCall-298", conf
ig="",
config_proto="\n\007\n\003CPU\020\001\n\007\n\003GPU\020\0002\002J\0008\001",
executor_type="", f=__forward_re
stored_function_body_509[]](input_1, dense/kernel, dense/bias)}}' was
changed by setting attribute after it was run b
y a session. This mutation will have no effect, and will trigger an
error in the future. Either don't modify nodes af
ter running them or create a new session. 2019-12-07 07:22:03.320478:
W tensorflow/python/util/util.cc:299] Sets are not currently
considered sequences, but th
is may change in the future, so consider avoiding using them.
Traceback (most recent call last): File
"/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py",
line 1363, in _do_call
return fn(*args) File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py",
line 1346, in _run_fn
self._extend_graph() File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py",
line 1386, in _extend_graph
tf_session.ExtendSession(self._session) tensorflow.python.framework.errors_impl.InvalidArgumentError: Node
'training/Adam/gradients/gradients/sequential/dens
e_1/StatefulPartitionedCall_grad/PartitionedCall': Connecting to
invalid output 1 of source node sequential/dense_1/S
tatefulPartitionedCall which has 1 outputs.
During handling of the above exception, another exception occurred:
Traceback (most recent call last): File "ct2.py", line 47, in
leftController.to_position(target, overrideAction) File "/opt/mowzr/motor_controller.py", line 94, in to_position
self.model.fit(self.prevInput, target_vec.reshape(-1, 9), epochs=1, verbose=0) File
"/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/engine/training.py",
line 766, in fit
use_multiprocessing=use_multiprocessing) File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/engine/training_arrays.py",
line 680, in
fit
steps_name='steps_per_epoch') File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/engine/training_arrays.py",
line 275, in
model_iteration
model.reset_metrics() File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/engine/training.py",
line 953, in reset_m
etrics
m.reset_states() File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/metrics.py",
line 209, in reset_states
K.batch_set_value([(v, 0) for v in self.variables]) File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/backend.py",
line 3343, in batch_set_valu
e
get_session().run(assign_ops, feed_dict=feed_dict) File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/backend.py",
line 490, in get_session
_initialize_variables(session) File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/backend.py",
line 905, in _initialize_var
iables
[variables_module.is_variable_initialized(v) for v in candidate_vars]) File
"/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py",
line 956, in run
run_metadata_ptr) File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py",
line 1179, in _run
feed_dict_tensor, options, run_metadata) File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py",
line 1357, in _do_run
run_metadata) File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py",
line 1382, in _do_call
raise type(e)(node_def, op, message) tensorflow.python.framework.errors_impl.InvalidArgumentError: Node
'training/Adam/gradients/gradients/sequential/dens
e_1/StatefulPartitionedCall_grad/PartitionedCall': Connecting to
invalid output 1 of source node sequential/dense_1/S
tatefulPartitionedCall which has 1 outputs.
During handling of the above exception, another exception occurred:
Traceback (most recent call last): File
"/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py",
line 1363, in _do_call
return fn(*args) File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py",
line 1346, in _run_fn
self._extend_graph() File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py",
line 1386, in _extend_graph
tf_session.ExtendSession(self._session) tensorflow.python.framework.errors_impl.InvalidArgumentError: Node
'training/Adam/gradients/gradients/sequential/dens
e_1/StatefulPartitionedCall_grad/PartitionedCall': Connecting to
invalid output 1 of source node sequential/dense_1/S
tatefulPartitionedCall which has 1 outputs.
During handling of the above exception, another exception occurred:
Traceback (most recent call last): File "ct2.py", line 53, in
leftController.saveModel() File "/opt/mowzr/motor_controller.py", line 116, in saveModel
self.model.save('motor_model', save_format='tf') File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/engine/network.py",
line 986, in save
signatures, options) File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/saving/save.py",
line 115, in save_model
signatures, options) File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/saving/saved_model/save.py",
line 74, in
save
save_lib.save(model, filepath, signatures, options) File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/saved_model/save.py",
line 924, in save
object_saver.save(utils_impl.get_variables_path(export_dir)) File
"/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/training/tracking/util.py",
line 1161, in save
session = get_session() File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/training/tracking/util.py",
line 71, in get_ses
sion
session = keras_backend.get_session() File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/backend.py",
line 490, in get_session
_initialize_variables(session) File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/backend.py",
line 905, in _initialize_var
iables
[variables_module.is_variable_initialized(v) for v in candidate_vars]) File
"/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py",
line 956, in run
run_metadata_ptr) File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py",
line 1179, in _run
feed_dict_tensor, options, run_metadata) File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py",
line 1357, in _do_run
run_metadata) File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py",
line 1382, in _do_call
raise type(e)(node_def, op, message) tensorflow.python.framework.errors_impl.InvalidArgumentError: Node
'training/Adam/gradients/gradients/sequential/dens
e_1/StatefulPartitionedCall_grad/PartitionedCall': Connecting to
invalid output 1 of source node sequential/dense_1/S
tatefulPartitionedCall which has 1 outputs.
I got the same error.
I don't know what exactly produces this error but there is a way to solve it (not a pretty one though). Create the model with the same architecture and just set the weights to the loaded model weights:
self.model = self.create_model()
self.model.set_weights(load_model("sample.model").get_weights())

Google cloud TPU: NotImplementedError: Non-resource Variables are not supported inside TPU computations

I am trying to train my model using google cloud's TPUs. The model works fine on CPU and GPU, and I can run the TPU tutorials without any problems (so it is not a problem of connecting to TPUs). However, when I run my program on the TPU cloud I get an error. The most important line is probably the following:
NotImplementedError: Non-resource Variables are not supported inside TPU computations (operator name: training_op/update_2nd_caps/primary_to_first_fc/W/ApplyAdam/RefEnter)
And here is the full error in case there is something important there:
Traceback (most recent call last):
File "TPU_playground.py", line 85, in <module>
capser.train(input_fn=train_input_fn_tpu, steps=n_steps)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/estimator/estimator.py", line 366, in train
loss = self._train_model(input_fn, hooks, saving_listeners)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/estimator/estimator.py", line 1119, in _train_model
return self._train_model_default(input_fn, hooks, saving_listeners)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/estimator/estimator.py", line 1132, in _train_model_default
features, labels, model_fn_lib.ModeKeys.TRAIN, self.config)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/tpu/python/tpu/tpu_estimator.py", line 1992, in _call_model_fn
features, labels, mode, config)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/estimator/estimator.py", line 1107, in _call_model_fn
model_fn_results = self._model_fn(features=features, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/tpu/python/tpu/tpu_estimator.py", line 2223, in _model_fn
_train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/tpu/python/tpu/tpu_estimator.py", line 2537, in _train_on_tpu_system
device_assignment=ctx.device_assignment)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/tpu/python/tpu/tpu.py", line 733, in shard
name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/tpu/python/tpu/tpu.py", line 394, in replicate
device_assignment, name)[1]
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/tpu/python/tpu/tpu.py", line 546, in split_compile_and_replicate
outputs = computation(*computation_inputs)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/tpu/python/tpu/tpu_estimator.py", line 2530, in multi_tpu_train_steps_on_single_shard
single_tpu_train_step, [_INITIAL_LOSS])
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/tpu/python/tpu/training_loop.py", line 207, in repeat
cond, body_wrapper, inputs=inputs, infeed_queue=infeed_queue, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/tpu/python/tpu/training_loop.py", line 169, in while_loop
name="")
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 3209, in while_loop
result = loop_context.BuildLoop(cond, body, loop_vars, shape_invariants)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 2941, in BuildLoop
pred, body, original_loop_vars, loop_vars, shape_invariants)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 2878, in _BuildLoop
body_result = body(*packed_vars_for_body)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/tpu/python/tpu/training_loop.py", line 120, in body_wrapper
outputs = body(*(inputs + dequeue_ops))
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/tpu/python/tpu/training_loop.py", line 203, in body_wrapper
return [i + 1] + _convert_to_list(body(*args))
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/tpu/python/tpu/tpu_estimator.py", line 1166, in train_step
self._call_model_fn(features, labels))
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/tpu/python/tpu/tpu_estimator.py", line 1337, in _call_model_fn
estimator_spec = self._model_fn(features=features, **kwargs)
File "/home/adrien_doerig/capser/capser_7_model_fn.py", line 100, in model_fn_tpu
**output_decoder_deconv_params)
File "/home/adrien_doerig/capser/capser_model.py", line 341, in capser_model
loss_training_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step(), name="training_op")
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/optimizer.py", line 409, in minimize
name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/tpu/python/tpu/tpu_optimizer.py", line 114, in apply_gradients
return self._opt.apply_gradients(summed_grads_and_vars, global_step, name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/optimizer.py", line 602, in apply_gradients
update_ops.append(processor.update_op(self, grad))
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/optimizer.py", line 113, in update_op
update_op = optimizer._apply_dense(g, self._v) # pylint: disable=protected-access
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/adam.py", line 148, in _apply_dense
grad, use_locking=self._use_locking).op
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/gen_training_ops.py", line 293, in apply_adam
use_nesterov=use_nesterov, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/op_def_library.py", line 787, in _apply_op_helper
op_def=op_def)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 3414, in create_op
op_def=op_def)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 1782, in __init__
self._control_flow_post_processing()
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 1793, in _control_flow_post_processing
self._control_flow_context.AddOp(self)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 2430, in AddOp
self._AddOpInternal(op)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 2451, in _AddOpInternal
real_x = self.AddValue(x)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 2398, in AddValue
self._outer_context.AddInnerOp(enter.op)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/tpu/python/tpu/tpu.py", line 310, in AddInnerOp
self._AddOpInternal(op)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/tpu/python/tpu/tpu.py", line 287, in _AddOpInternal
"(operator name: %s)" % op.name)
NotImplementedError: Non-resource Variables are not supported inside TPU computations (operator name: training_op/update_2nd_caps/primary_to_first_fc/W/ApplyAdam/RefEnter)
It seems that the forward pass of the graph is built fine, but the backprop using AdamOptimizer is not supported by the TPUs in this case. I tried using more standard optimizers (GradientDescentOptimizer and MomentumOptimizer) but it doesn't help. All the tensors in the feedforward pass are in formats compatible with TPUs (i.e. tf.float32).
Does anyone have suggestions as to what I should try?
Thank you!
I have found a way to use the TPUs without using the ctpu up command, which solves the problem. I simply do everything exactly as I would do it to run my code on cloud GPUs:
-- see documentation here: https://cloud.google.com/ml-engine/docs/tensorflow/getting-started-training-prediction
-- a simple explanatory video here: https://www.youtube.com/watch?v=J_d4bEKUG2Q
BUT, the ONLY DIFFERENCE is that I use --scale-tier 'BASIC_TPU' instead of --scale-tier 'STANDARD_1' when I run my job. So the command to run the job is
gcloud ml-engine jobs submit training $JOB_NAME --module-name capser.capser_7_multi_gpu --package-path ./capser --job-dir=gs://capser-data/$JOB_NAME --scale-tier 'BASIC_TPU' --stream-logs --runtime-version 1.9
--region us-central1
(I previously define the variable $JOB_NAME: export JOB_NAME=<input your job name>)
Also, make sure you choose a region which has TPUs! us-central1 works for example.
So maybe it is a small bug when using ctpu up, but it seems not to be a problem when using the above method. I hope that helps!

no kernel image is available for execution on the device

I training maskrcnn ,use tf-1.2 can train, but I use tf-1.5 it not training
The error is as follows:
Caused by op u'pyramid_1/AssignGTBoxes/Where_6', defined at:
File "/home/zhouzd2/letrain/applications/letrain.py", line 349, in <module>
tf.app.run()
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/platform/app.py", line 124, in run
_sys.exit(main(argv))
File "/home/zhouzd2/letrain/applications/letrain.py", line 346, in main
LeTrain().model_train(user_mode)
File "/home/zhouzd2/letrain/platform/base_train.py", line 1228, in model_train
cluster=self.cluster_spec)
File "/home/zhouzd2/letrain/platform/deployment/model_deploy.py", line 226, in create_clones
outputs, feed_ops,verify_model_loss = model_fn(*args, **kwargs)
File "/home/zhouzd2/letrain/platform/base_train.py", line 1195, in clone_fn
model_loss, end_points, feed_ops = network_fn(data_direct, data_batch, int_network_fn)
File "/home/zhouzd2/letrain/applications/letrain.py", line 214, in get_loss
FLAGS.batch_size)
File "/home/zhouzd2/letrain/applications/fmrcnn/get_fmrcnn_loss.py", line 23, in model_fn
loss_weights=[0.2, 0.2, 1.0, 0.2, 1.0])
File "/home/zhouzd2/letrain/applications/fmrcnn/libs/nets/pyramid_network.py", line 580, in build
is_training=is_training, gt_boxes=gt_boxes)
File "/home/zhouzd2/letrain/applications/fmrcnn/libs/nets/pyramid_network.py", line 263, in build_heads
assign_boxes(rois, [rois, batch_inds], [2, 3, 4, 5])
File "/home/zhouzd2/letrain/applications/fmrcnn/libs/layers/wrapper.py", line 173, in assign_boxes
inds = tf.where(tf.equal(assigned_layers, l))
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/array_ops.py", line 2538, in where
return gen_array_ops.where(condition=condition, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gen_array_ops.py", line 6087, in where
"Where", input=condition, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/op_def_library.py", line 787, in _apply_op_helper
op_def=op_def)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 3160, in create_op
op_def=op_def)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 1625, in __init__
self._traceback = self._graph._extract_stack() # pylint: disable=protected-access
InternalError (see above for traceback): WhereOp: Could not launch cub::DeviceReduce::Sum to count number of true / nonzero indices. temp_storage_bytes: 1, status: no kernel image is available for execution on the device
[[Node: pyramid_1/AssignGTBoxes/Where_6 = Where[T=DT_BOOL, _device="/job:worker/replica:0/task:0/device:GPU:0"](pyramid_1/AssignGTBoxes/Equal_6_S9493)]]
[[Node: pyramid_1/AssignGTBoxes/Reshape_8_G1028 = _Recv[client_terminated=false, recv_device="/job:worker/replica:0/task:0/device:CPU:0", send_device="/job:worker/replica:0/task:0/device:GPU:0", send_device_incarnation=5407481677180697062, tensor_name="edge_1349_pyramid_1/AssignGTBoxes/Reshape_8", tensor_type=DT_INT64, _device="/job:worker/replica:0/task:0/device:CPU:0"]()]]
No problem when loading calculation graphs, error is reported in sess.run()。
Does anyone know how to solve this problem? Or does anyone know what function can replace tf.where?
Thank you!
If you are using Visual Studio:
Right click on the project > Properies > Cuda C/C++ > Device
and add the following to Code Generation field
compute_30,sm_30;compute_35,sm_35;compute_37,sm_37;compute_50,sm_50;compute_52,sm_52;compute_60,sm_60;compute_61,sm_61;compute_70,sm_70;compute_75,sm_75;

How can I process String Tensor on GPU in Tensorflow?

I want to preprocess some data as the input for Tensorflow, but the file is huge and I think maybe Tensorflow can accelerate this process on GPU. And I find some operation for String base in the library, However, when I do a simple test, it seems Tensorflow not support that operation on GPU, if anyone has some idea to process String data on GPU in Tensorflow or some other advance? For now, I just think maybe I have to do it on the CUDA level.
Here is the test code:
import tensorflow as tf
sess = tf.InteractiveSession()
with tf.device('/gpu:0'):
text = tf.constant("aa a", name="LEFT")
result = tf.string_split([text], delimiter=" ")
print result.eval()
and error information:
I tensorflow/core/common_runtime/gpu/gpu_device.cc:975] Creating TensorFlow device (/gpu:0) -> (device: 0, name: GeForce GTX 1070, pci bus id: 0000:01:00.0)
Traceback (most recent call last):
File "/media/freshield/BUFFER/LEARN_TENSORFLOW/11_tensorboard_graph/testing.py", line 51, in <module>
print result.eval()
File "/home/freshield/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/sparse_tensor.py", line 225, in eval
session)
File "/home/freshield/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 3797, in _eval_using_default_session
return session.run(tensors, feed_dict)
File "/home/freshield/anaconda2/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 767, in run
run_metadata_ptr)
File "/home/freshield/anaconda2/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 965, in _run
feed_dict_string, options, run_metadata)
File "/home/freshield/anaconda2/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1015, in _do_run
target_list, options, run_metadata)
File "/home/freshield/anaconda2/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1035, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Cannot assign a device to node 'packed': Could not satisfy explicit device specification '/device:GPU:0' because no supported kernel for GPU devices is available.
[[Node: packed = Pack[N=1, T=DT_STRING, axis=0, _device="/device:GPU:0"](LEFT)]]
Caused by op u'packed', defined at:
File "/media/freshield/BUFFER/LEARN_TENSORFLOW/11_tensorboard_graph/testing.py", line 50, in <module>
result = tf.string_split([text], delimiter=" ")
File "/home/freshield/anaconda2/lib/python2.7/site-packages/tensorflow/python/ops/string_ops.py", line 104, in string_split
source = ops.convert_to_tensor(source, dtype=dtypes.string)
File "/home/freshield/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 651, in convert_to_tensor
as_ref=False)
File "/home/freshield/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 716, in internal_convert_to_tensor
ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
File "/home/freshield/anaconda2/lib/python2.7/site-packages/tensorflow/python/ops/array_ops.py", line 923, in _autopacking_conversion_function
return _autopacking_helper(v, inferred_dtype, name or "packed")
File "/home/freshield/anaconda2/lib/python2.7/site-packages/tensorflow/python/ops/array_ops.py", line 886, in _autopacking_helper
return gen_array_ops._pack(elems_as_tensors, name=scope)
File "/home/freshield/anaconda2/lib/python2.7/site-packages/tensorflow/python/ops/gen_array_ops.py", line 2041, in _pack
result = _op_def_lib.apply_op("Pack", values=values, axis=axis, name=name)
File "/home/freshield/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/op_def_library.py", line 763, in apply_op
op_def=op_def)
File "/home/freshield/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 2395, in create_op
original_op=self._default_original_op, op_def=op_def)
File "/home/freshield/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 1264, in __init__
self._traceback = _extract_stack()
InvalidArgumentError (see above for traceback): Cannot assign a device to node 'packed': Could not satisfy explicit device specification '/device:GPU:0' because no supported kernel for GPU devices is available.
[[Node: packed = Pack[N=1, T=DT_STRING, axis=0, _device="/device:GPU:0"](LEFT)]]

tensorflow transfer learning problems to run on GPU

I am trying to run the retrain.py script (available here: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/image_retraining/retrain.py). I have noticed that the part starting with the line 747 is executed on CPU when the default should be GPU. So, I have added the following line to force it to work on GPU:
`with tf.device("/gpu:0"):
(train_step, cross_entropy, bottleneck_input, ground_truth_input, final_tensor) = add_final_training_ops(len(image_lists.keys()),
FLAGS.final_tensor_name,
bottleneck_tensor)`
It causes the following error:
'tensorflow.python.framework.errors.InvalidArgumentError: Cannot assign a device to node 'gradients/Mean_grad/Prod': Could not satisfy explicit device specification '/device:GPU:0' because no supported kernel for GPU devices is available
[[Node: gradients/Mean_grad/Prod = Prod[T=DT_INT32, keep_dims=false, _device="/device:GPU:0"](gradients/Mean_grad/Shape_2, gradients/Mean_grad/range_1)]]
Caused by op u'gradients/Mean_grad/Prod', defined at:
File "retrain_tensorboard_pickle_mean.py", line 921, in <module>
tf.app.run()
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/platform/app.py", line 30, in run
sys.exit(main(sys.argv))
File "retrain_tensorboard_pickle_mean.py", line 839, in main
(train_step, cross_entropy, bottleneck_input, ground_truth_input, label_ground_truth_input, final_tensor) = add_final_training_ops(len(image_lists.keys()), FLAGS.final_tensor_name, bottleneck_tensor)
File "retrain_tensorboard_pickle_mean.py", line 686, in add_final_training_ops
cross_entropy_mean)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/optimizer.py", line 190, in minimize
colocate_gradients_with_ops=colocate_gradients_with_ops)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/optimizer.py", line 241, in compute_gradients
colocate_gradients_with_ops=colocate_gradients_with_ops)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gradients.py", line 481, in gradients
in_grads = _AsList(grad_fn(op, *out_grads))
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/math_grad.py", line 91, in _MeanGrad
factor = (math_ops.reduce_prod(input_shape) //
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/math_ops.py", line 810, in reduce_prod
keep_dims, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gen_math_ops.py", line 1115, in _prod
keep_dims=keep_dims, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/op_def_library.py", line 655, in apply_op
op_def=op_def)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 2146, in create_op
original_op=self._default_original_op, op_def=op_def)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 1154, in __init__
self._traceback = _extract_stack()
...which was originally created as op u'Mean', defined at:
File "retrain_tensorboard_pickle_mean.py", line 921, in <module>
tf.app.run()
[elided 1 identical lines from previous traceback]
File "retrain_tensorboard_pickle_mean.py", line 839, in main
(train_step, cross_entropy, bottleneck_input, ground_truth_input, label_ground_truth_input, final_tensor) = add_final_training_ops(len(image_lists.keys()), FLAGS.final_tensor_name, bottleneck_tensor)
File "retrain_tensorboard_pickle_mean.py", line 681, in add_final_training_ops
cross_entropy_mean = tf.reduce_mean(cross_entropy)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/math_ops.py", line 783, in reduce_mean
keep_dims, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gen_math_ops.py", line 973, in _mean
keep_dims=keep_dims, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/op_def_library.py", line 655, in apply_op
op_def=op_def)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 2146, in create_op
original_op=self._default_original_op, op_def=op_def)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 1154, in __init__
self._traceback = _extract_stack()
I have found here that it might be a problem that mean is not implemented on GPU but on the other hand there is a commit on github which fixes counting mean on GPU.
Previous part, e.g. generating bottlenecks (line 744) runs perfectly on GPU, without even forcing it.
I would be grateful for any help!!
Justyna
This has now been fixed in b874e2c, nice catch