TensorFlow beginner use estimator for prediction after running experiment - tensorflow

I am following this guide by Google (https://github.com/GoogleCloudPlatform/training-data-analyst/blob/master/courses/machine_learning/tensorflow/d_experiment.ipynb) to build a simple linear regression model.
In the notebook it has used the Experiment class and learn_runner (a class that I cannot find any documentation) to train up the model. I am now trying to use the model for prediction. I tried the following but i got an error. Would you please let me know the correct way to do it? Thanks.
Code added to the bottom:
# load the saved model
estimator = tflearn.LinearRegressor(feature_columns=feature_cols, model_dir='taxi_trained')
estimator.predict(input_fn=get_test)
Error got:
INFO:tensorflow:Using default config.
INFO:tensorflow:Using config: {'_is_chief': True, '_model_dir': None, '_save_checkpoints_secs': 600, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x00000218611630F0>, '_master': '', '_task_id': 0, '_keep_checkpoint_every_n_hours': 10000, '_evaluation_master': '', '_environment': 'local', '_num_worker_replicas': 0, '_tf_random_seed': None, '_tf_config': gpu_options {
per_process_gpu_memory_fraction: 1
}
, '_save_checkpoints_steps': None, '_keep_checkpoint_max': 5, '_task_type': None, '_num_ps_replicas': 0, '_save_summary_steps': 100}
WARNING:tensorflow:From c:\users\tommy\appdata\local\programs\python\python35\lib\site-packages\tensorflow\python\util\deprecation.py:335: calling LinearRegressor.predict (from tensorflow.contrib.learn.python.learn.estimators.linear) with outputs=None is deprecated and will be removed after 2017-03-01.
Instructions for updating:
Please switch to predict_scores, or set `outputs` argument.
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-5-7f1903437174> in <module>()
1 with tf.Session() as sess:
2 estimator = tflearn.LinearRegressor(feature_columns=feature_cols, model_dir='taxi_trained')
----> 3 estimator.predict(input_fn=get_test)
c:\users\tommy\appdata\local\programs\python\python35\lib\site-packages\tensorflow\python\util\deprecation.py in new_func(*args, **kwargs)
333 _call_location(), decorator_utils.get_qualified_name(func),
334 func.__module__, arg_name, arg_value, date, instructions)
--> 335 return func(*args, **kwargs)
336 new_func.__doc__ = _add_deprecated_arg_notice_to_docstring(
337 func.__doc__, date, instructions)
c:\users\tommy\appdata\local\programs\python\python35\lib\site-packages\tensorflow\python\util\deprecation.py in new_func(*args, **kwargs)
333 _call_location(), decorator_utils.get_qualified_name(func),
334 func.__module__, arg_name, arg_value, date, instructions)
--> 335 return func(*args, **kwargs)
336 new_func.__doc__ = _add_deprecated_arg_notice_to_docstring(
337 func.__doc__, date, instructions)
c:\users\tommy\appdata\local\programs\python\python35\lib\site-packages\tensorflow\contrib\learn\python\learn\estimators\linear.py in predict(self, x, input_fn, batch_size, outputs, as_iterable)
755 input_fn=input_fn,
756 batch_size=batch_size,
--> 757 as_iterable=as_iterable)
758 return super(LinearRegressor, self).predict(
759 x=x,
c:\users\tommy\appdata\local\programs\python\python35\lib\site-packages\tensorflow\python\util\deprecation.py in new_func(*args, **kwargs)
333 _call_location(), decorator_utils.get_qualified_name(func),
334 func.__module__, arg_name, arg_value, date, instructions)
--> 335 return func(*args, **kwargs)
336 new_func.__doc__ = _add_deprecated_arg_notice_to_docstring(
337 func.__doc__, date, instructions)
c:\users\tommy\appdata\local\programs\python\python35\lib\site-packages\tensorflow\contrib\learn\python\learn\estimators\linear.py in predict_scores(self, x, input_fn, batch_size, as_iterable)
790 batch_size=batch_size,
791 outputs=[key],
--> 792 as_iterable=as_iterable)
793 if as_iterable:
794 return _as_iterable(preds, output=key)
c:\users\tommy\appdata\local\programs\python\python35\lib\site-packages\tensorflow\python\util\deprecation.py in new_func(*args, **kwargs)
279 _call_location(), decorator_utils.get_qualified_name(func),
280 func.__module__, arg_name, date, instructions)
--> 281 return func(*args, **kwargs)
282 new_func.__doc__ = _add_deprecated_arg_notice_to_docstring(
283 func.__doc__, date, instructions)
c:\users\tommy\appdata\local\programs\python\python35\lib\site-packages\tensorflow\contrib\learn\python\learn\estimators\estimator.py in predict(self, x, input_fn, batch_size, outputs, as_iterable)
563 feed_fn=feed_fn,
564 outputs=outputs,
--> 565 as_iterable=as_iterable)
566
567 def get_variable_value(self, name):
c:\users\tommy\appdata\local\programs\python\python35\lib\site-packages\tensorflow\contrib\learn\python\learn\estimators\estimator.py in _infer_model(self, input_fn, feed_fn, outputs, as_iterable, iterate_batches)
855 contrib_framework.create_global_step(g)
856 features = self._get_features_from_input_fn(input_fn)
--> 857 infer_ops = self._get_predict_ops(features)
858 predictions = self._filter_predictions(infer_ops.predictions, outputs)
859 mon_sess = monitored_session.MonitoredSession(
c:\users\tommy\appdata\local\programs\python\python35\lib\site-packages\tensorflow\contrib\learn\python\learn\estimators\estimator.py in _get_predict_ops(self, features)
1186 labels = tensor_signature.create_placeholders_from_signatures(
1187 self._labels_info)
-> 1188 return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.INFER)
1189
1190 def export_savedmodel(
c:\users\tommy\appdata\local\programs\python\python35\lib\site-packages\tensorflow\contrib\learn\python\learn\estimators\estimator.py in _call_model_fn(self, features, labels, mode)
1101 if 'model_dir' in model_fn_args:
1102 kwargs['model_dir'] = self.model_dir
-> 1103 model_fn_results = self._model_fn(features, labels, **kwargs)
1104
1105 if isinstance(model_fn_results, model_fn_lib.ModelFnOps):
c:\users\tommy\appdata\local\programs\python\python35\lib\site-packages\tensorflow\contrib\learn\python\learn\estimators\linear.py in _linear_model_fn(features, labels, mode, params, config)
159 num_outputs=head.logits_dimension,
160 weight_collections=[parent_scope],
--> 161 scope=scope)
162
163 def _train_op_fn(loss):
c:\users\tommy\appdata\local\programs\python\python35\lib\site-packages\tensorflow\contrib\layers\python\layers\feature_column_ops.py in weighted_sum_from_feature_columns(columns_to_tensors, feature_columns, num_outputs, weight_collections, trainable, scope)
529 # pylint: disable=protected-access
530 for column in sorted(set(feature_columns), key=lambda x: x.key):
--> 531 transformed_tensor = transformer.transform(column)
532 try:
533 embedding_lookup_arguments = column._wide_embedding_lookup_arguments(
c:\users\tommy\appdata\local\programs\python\python35\lib\site-packages\tensorflow\contrib\layers\python\layers\feature_column_ops.py in transform(self, feature_column)
880 return self._columns_to_tensors[feature_column]
881
--> 882 feature_column.insert_transformed_feature(self._columns_to_tensors)
883
884 if feature_column not in self._columns_to_tensors:
c:\users\tommy\appdata\local\programs\python\python35\lib\site-packages\tensorflow\contrib\layers\python\layers\feature_column.py in insert_transformed_feature(self, columns_to_tensors)
1406 """
1407 # Transform the input tensor according to the normalizer function.
-> 1408 input_tensor = self._normalized_input_tensor(columns_to_tensors[self.name])
1409 columns_to_tensors[self] = math_ops.to_float(input_tensor)
1410
KeyError: 'dropofflat'
I am using TensorFlow 1.1 with Python 3.5 on Windows 10. GPU enabled.

Related

UnImplimented Error while training on TPU in Colab

When trying to train my model on TPU in Colab
model.fit(train_dataset,
steps_per_epoch = len(df_train) // config.BATCH_SIZE,
validation_data = valid_dataset,
epochs = config.EPOCHS)
I got this error with whole traceback:
UnimplementedError Traceback (most recent call last)
<ipython-input-37-92afbe2b5ae5> in <module>()
2 steps_per_epoch = len(df_train) // config.BATCH_SIZE,
3 validation_data = valid_dataset,
----> 4 epochs = config.EPOCHS)
13 frames
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
1186 logs = tmp_logs # No error, now safe to assign to logs.
1187 end_step = step + data_handler.step_increment
-> 1188 callbacks.on_train_batch_end(end_step, logs)
1189 if self.stop_training:
1190 break
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/callbacks.py in on_train_batch_end(self, batch, logs)
455 """
456 if self._should_call_train_batch_hooks:
--> 457 self._call_batch_hook(ModeKeys.TRAIN, 'end', batch, logs=logs)
458
459 def on_test_batch_begin(self, batch, logs=None):
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/callbacks.py in _call_batch_hook(self, mode, hook, batch, logs)
315 self._call_batch_begin_hook(mode, batch, logs)
316 elif hook == 'end':
--> 317 self._call_batch_end_hook(mode, batch, logs)
318 else:
319 raise ValueError('Unrecognized hook: {}'.format(hook))
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/callbacks.py in _call_batch_end_hook(self, mode, batch, logs)
335 self._batch_times.append(batch_time)
336
--> 337 self._call_batch_hook_helper(hook_name, batch, logs)
338
339 if len(self._batch_times) >= self._num_batches_for_timing_check:
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/callbacks.py in _call_batch_hook_helper(self, hook_name, batch, logs)
373 for callback in self.callbacks:
374 hook = getattr(callback, hook_name)
--> 375 hook(batch, logs)
376
377 if self._check_timing:
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/callbacks.py in on_train_batch_end(self, batch, logs)
1027
1028 def on_train_batch_end(self, batch, logs=None):
-> 1029 self._batch_update_progbar(batch, logs)
1030
1031 def on_test_batch_end(self, batch, logs=None):
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/callbacks.py in _batch_update_progbar(self, batch, logs)
1099 if self.verbose == 1:
1100 # Only block async when verbose = 1.
-> 1101 logs = tf_utils.sync_to_numpy_or_python_type(logs)
1102 self.progbar.update(self.seen, list(logs.items()), finalize=False)
1103
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/utils/tf_utils.py in sync_to_numpy_or_python_type(tensors)
517 return t # Don't turn ragged or sparse tensors to NumPy.
518
--> 519 return nest.map_structure(_to_single_numpy_or_python_type, tensors)
520
521
/usr/local/lib/python3.7/dist-packages/tensorflow/python/util/nest.py in map_structure(func, *structure, **kwargs)
865
866 return pack_sequence_as(
--> 867 structure[0], [func(*x) for x in entries],
868 expand_composites=expand_composites)
869
/usr/local/lib/python3.7/dist-packages/tensorflow/python/util/nest.py in <listcomp>(.0)
865
866 return pack_sequence_as(
--> 867 structure[0], [func(*x) for x in entries],
868 expand_composites=expand_composites)
869
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/utils/tf_utils.py in _to_single_numpy_or_python_type(t)
513 def _to_single_numpy_or_python_type(t):
514 if isinstance(t, ops.Tensor):
--> 515 x = t.numpy()
516 return x.item() if np.ndim(x) == 0 else x
517 return t # Don't turn ragged or sparse tensors to NumPy.
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/ops.py in numpy(self)
1092 """
1093 # TODO(slebedev): Consider avoiding a copy for non-CPU or remote tensors.
-> 1094 maybe_arr = self._numpy() # pylint: disable=protected-access
1095 return maybe_arr.copy() if isinstance(maybe_arr, np.ndarray) else maybe_arr
1096
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/ops.py in _numpy(self)
1060 return self._numpy_internal()
1061 except core._NotOkStatusException as e: # pylint: disable=protected-access
-> 1062 six.raise_from(core._status_to_exception(e.code, e.message), None) # pylint: disable=protected-access
1063
1064 #property
/usr/local/lib/python3.7/dist-packages/six.py in raise_from(value, from_value)
UnimplementedError: 9 root error(s) found.
(0) Unimplemented: {{function_node __inference_train_function_88574}} Asked to propagate a dynamic dimension from hlo convolution.24975#{}#2 to hlo %all-reduce.24980 = f32[3,3,<=3,32]{3,2,1,0} all-reduce(f32[3,3,<=3,32]{3,2,1,0} %convolution.24975), replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=%sum.24976, metadata={op_type="CrossReplicaSum" op_name="while/body/_1/while/Adam/CrossReplicaSum"}, which is not implemented.
[[{{node TPUReplicate/_compile/_18168620323984915962/_4}}]]
[[while/body/_1/while/strided_slice_1/_253]]
(1) Unimplemented: {{function_node __inference_train_function_88574}} Asked to propagate a dynamic dimension from hlo convolution.24975#{}#2 to hlo %all-reduce.24980 = f32[3,3,<=3,32]{3,2,1,0} all-reduce(f32[3,3,<=3,32]{3,2,1,0} %convolution.24975), replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=%sum.24976, metadata={op_type="CrossReplicaSum" op_name="while/body/_1/while/Adam/CrossReplicaSum"}, which is not implemented.
[[{{node TPUReplicate/_compile/_18168620323984915962/_4}}]]
[[TPUReplicate/_compile/_18168620323984915962/_4/_243]]
(2) Unimplemented: {{function_node __inference_train_function_88574}} Asked to propagate a dynamic dimension from hlo convolution.24975#{}#2 to hlo %all-reduce.24980 = f32[3,3,<=3,32]{3,2,1,0} all-reduce(f32[3,3,<=3,32]{3,2,1,0} %convolution.24975), replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=%sum.24976, metadata={op_type="CrossReplicaSum" op_name="while/body/_1/while/Adam/CrossReplicaSum"}, which is not implemented.[truncated]
Things That I have checked:
My data is in a GCS bucket and can be retrieved using the dataset object I created.
My model definition:
with strategy.scope():
base_model = efn.EfficientNetB0(include_top=False)
model = tf.keras.Sequential([
tf.keras.layers.Input(shape=(config.IMG_SIZE, config.IMG_SIZE, 3)),
base_model,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(5, activation='softmax')
])
model.compile(optimizer = tf.keras.optimizers.Adam(learning_rate=config.LR),
loss = tf.keras.losses.SparseCategoricalCrossentropy(),
metrics = [tf.keras.metrics.SparseCategoricalAccuracy()],
steps_per_execution = 32)
Any idea why this is happening. It says that a dynamic dimension was asked to propagate but I don't think this should be the case. Considering the model worked in GPU settings (with data present in the current session).

TypeError: Expected any non-tensor type, got a tensor instead

I Was following a post on 'Training a transformer model for a chatbot with TensorFlow 2.0'. I have encountered an error on my local machine although the code seems to work fine in colab. Below is the code snippet.
def encoder_layer(units, d_model, num_heads, dropout, name="encoder_layer"):
inputs = tf.keras.Input(shape=(None, d_model), name="inputs")
padding_mask = tf.keras.Input(shape=(1, 1, None), name="padding_mask")
attention = MultiHeadAttention(
d_model, num_heads, name="attention")({
'query': inputs,
'key': inputs,
'value': inputs,
'mask': padding_mask
})
attention = tf.keras.layers.Dropout(rate=dropout)(attention)
attention = tf.keras.layers.LayerNormalization(
epsilon=1e-6)(inputs + attention)
outputs = tf.keras.layers.Dense(units=units, activation='relu')(attention)
outputs = tf.keras.layers.Dense(units=d_model)(outputs)
outputs = tf.keras.layers.Dropout(rate=dropout)(outputs)
outputs = tf.keras.layers.LayerNormalization(
epsilon=1e-6)(attention + outputs)
return tf.keras.Model(
inputs=[inputs, padding_mask], outputs=outputs, name=name)
I called above function with the following function call;
sample_encoder_layer = encoder_layer(
units=512,
d_model=128,
num_heads=4,
dropout=0.3,
name="sample_encoder_layer")
Below is the traceback of the error:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
~/anaconda3/envs/tf-chatbot/lib/python3.6/site-packages/tensorflow/python/framework/tensor_util.py in _AssertCompatible(values, dtype)
323 try:
--> 324 fn(values)
325 except ValueError as e:
~/anaconda3/envs/tf-chatbot/lib/python3.6/site-packages/tensorflow/python/framework/tensor_util.py in _check_not_tensor(values)
275 def _check_not_tensor(values):
--> 276 _ = [_check_failed(v) for v in nest.flatten(values)
277 if isinstance(v, ops.Tensor)]
~/anaconda3/envs/tf-chatbot/lib/python3.6/site-packages/tensorflow/python/framework/tensor_util.py in <listcomp>(.0)
276 _ = [_check_failed(v) for v in nest.flatten(values)
--> 277 if isinstance(v, ops.Tensor)]
278 # pylint: enable=invalid-name
~/anaconda3/envs/tf-chatbot/lib/python3.6/site-packages/tensorflow/python/framework/tensor_util.py in _check_failed(v)
247 # it is safe to use here.
--> 248 raise ValueError(v)
249
ValueError: Tensor("attention_1/Identity:0", shape=(None, None, 128), dtype=float32)
During handling of the above exception, another exception occurred:
TypeError Traceback (most recent call last)
<ipython-input-20-3fa05a9bbfda> in <module>
----> 1 sample_encoder_layer = encoder_layer(units=512, d_model=128, num_heads=4, dropout=0.3, name='sample_encoder_layer')
2
3 tf.keras.utils.plot_model(
4 sample_encoder_layer, to_file='encoder_layer.png', show_shapes=True)
<ipython-input-18-357ca53de1c0> in encoder_layer(units, d_model, num_heads, dropout, name)
10 'mask': padding_mask
11 })
---> 12 attention = tf.keras.layers.Dropout(rate=dropout)(attention)
13 attention = tf.keras.layers.LayerNormalization(
14 epsilon=1e-6)(inputs + attention)
~/anaconda3/envs/tf-chatbot/lib/python3.6/site-packages/tensorflow/python/keras/engine/base_layer.py in __call__(self, *args, **kwargs)
920 not base_layer_utils.is_in_eager_or_tf_function()):
921 with auto_control_deps.AutomaticControlDependencies() as acd:
--> 922 outputs = call_fn(cast_inputs, *args, **kwargs)
923 # Wrap Tensors in `outputs` in `tf.identity` to avoid
924 # circular dependencies.
~/anaconda3/envs/tf-chatbot/lib/python3.6/site-packages/tensorflow/python/keras/layers/core.py in call(self, inputs, training)
209 output = tf_utils.smart_cond(training,
210 dropped_inputs,
--> 211 lambda: array_ops.identity(inputs))
212 return output
213
~/anaconda3/envs/tf-chatbot/lib/python3.6/site-packages/tensorflow/python/keras/utils/tf_utils.py in smart_cond(pred, true_fn, false_fn, name)
63 pred, true_fn=true_fn, false_fn=false_fn, name=name)
64 return smart_module.smart_cond(
---> 65 pred, true_fn=true_fn, false_fn=false_fn, name=name)
66
67
~/anaconda3/envs/tf-chatbot/lib/python3.6/site-packages/tensorflow/python/framework/smart_cond.py in smart_cond(pred, true_fn, false_fn, name)
57 else:
58 return control_flow_ops.cond(pred, true_fn=true_fn, false_fn=false_fn,
---> 59 name=name)
60
61
~/anaconda3/envs/tf-chatbot/lib/python3.6/site-packages/tensorflow/python/util/deprecation.py in new_func(*args, **kwargs)
505 'in a future version' if date is None else ('after %s' % date),
506 instructions)
--> 507 return func(*args, **kwargs)
508
509 doc = _add_deprecated_arg_notice_to_docstring(
~/anaconda3/envs/tf-chatbot/lib/python3.6/site-packages/tensorflow/python/ops/control_flow_ops.py in cond(pred, true_fn, false_fn, strict, name, fn1, fn2)
1175 if (util.EnableControlFlowV2(ops.get_default_graph()) and
1176 not context.executing_eagerly()):
-> 1177 return cond_v2.cond_v2(pred, true_fn, false_fn, name)
1178
1179 # We needed to make true_fn/false_fn keyword arguments for
~/anaconda3/envs/tf-chatbot/lib/python3.6/site-packages/tensorflow/python/ops/cond_v2.py in cond_v2(pred, true_fn, false_fn, name)
82 true_name, collections=ops.get_default_graph()._collections), # pylint: disable=protected-access
83 add_control_dependencies=add_control_dependencies,
---> 84 op_return_value=pred)
85 false_graph = func_graph_module.func_graph_from_py_func(
86 false_name,
~/anaconda3/envs/tf-chatbot/lib/python3.6/site-packages/tensorflow/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
979 _, original_func = tf_decorator.unwrap(python_func)
980
--> 981 func_outputs = python_func(*func_args, **func_kwargs)
982
983 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
~/anaconda3/envs/tf-chatbot/lib/python3.6/site-packages/tensorflow/python/keras/layers/core.py in dropped_inputs()
205 noise_shape=self._get_noise_shape(inputs),
206 seed=self.seed,
--> 207 rate=self.rate)
208
209 output = tf_utils.smart_cond(training,
~/anaconda3/envs/tf-chatbot/lib/python3.6/site-packages/tensorflow/python/util/deprecation.py in new_func(*args, **kwargs)
505 'in a future version' if date is None else ('after %s' % date),
506 instructions)
--> 507 return func(*args, **kwargs)
508
509 doc = _add_deprecated_arg_notice_to_docstring(
~/anaconda3/envs/tf-chatbot/lib/python3.6/site-packages/tensorflow/python/ops/nn_ops.py in dropout(x, keep_prob, noise_shape, seed, name, rate)
4341 raise ValueError("You must provide a rate to dropout.")
4342
-> 4343 return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)
4344
4345
~/anaconda3/envs/tf-chatbot/lib/python3.6/site-packages/tensorflow/python/ops/nn_ops.py in dropout_v2(x, rate, noise_shape, seed, name)
4422 raise ValueError("rate must be a scalar tensor or a float in the "
4423 "range [0, 1), got %g" % rate)
-> 4424 x = ops.convert_to_tensor(x, name="x")
4425 x_dtype = x.dtype
4426 if not x_dtype.is_floating:
~/anaconda3/envs/tf-chatbot/lib/python3.6/site-packages/tensorflow/python/framework/ops.py in convert_to_tensor(value, dtype, name, as_ref, preferred_dtype, dtype_hint, ctx, accepted_result_types)
1339
1340 if ret is None:
-> 1341 ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
1342
1343 if ret is NotImplemented:
~/anaconda3/envs/tf-chatbot/lib/python3.6/site-packages/tensorflow/python/framework/constant_op.py in _constant_tensor_conversion_function(v, dtype, name, as_ref)
319 as_ref=False):
320 _ = as_ref
--> 321 return constant(v, dtype=dtype, name=name)
322
323
~/anaconda3/envs/tf-chatbot/lib/python3.6/site-packages/tensorflow/python/framework/constant_op.py in constant(value, dtype, shape, name)
260 """
261 return _constant_impl(value, dtype, shape, name, verify_shape=False,
--> 262 allow_broadcast=True)
263
264
~/anaconda3/envs/tf-chatbot/lib/python3.6/site-packages/tensorflow/python/framework/constant_op.py in _constant_impl(value, dtype, shape, name, verify_shape, allow_broadcast)
298 tensor_util.make_tensor_proto(
299 value, dtype=dtype, shape=shape, verify_shape=verify_shape,
--> 300 allow_broadcast=allow_broadcast))
301 dtype_value = attr_value_pb2.AttrValue(type=tensor_value.tensor.dtype)
302 const_tensor = g._create_op_internal( # pylint: disable=protected-access
~/anaconda3/envs/tf-chatbot/lib/python3.6/site-packages/tensorflow/python/framework/tensor_util.py in make_tensor_proto(values, dtype, shape, verify_shape, allow_broadcast)
449 nparray = np.empty(shape, dtype=np_dt)
450 else:
--> 451 _AssertCompatible(values, dtype)
452 nparray = np.array(values, dtype=np_dt)
453 # check to them.
~/anaconda3/envs/tf-chatbot/lib/python3.6/site-packages/tensorflow/python/framework/tensor_util.py in _AssertCompatible(values, dtype)
326 [mismatch] = e.args
327 if dtype is None:
--> 328 raise TypeError("Expected any non-tensor type, got a tensor instead.")
329 else:
330 raise TypeError("Expected %s, got %s of type '%s' instead." %
TypeError: Expected any non-tensor type, got a tensor instead.
I had this error when I converted a function argument of int datatype to tf.constant . I resolved the issue in my case by undoing it. I faced this issue when I was converting TF1 codes to TF2.3.0 . Looking at your error trace I can see it's pointed to handling some constants in tf-chatbot. Kindly check how that constant is handled.
This is a fixed issue in TensorFlow 2.3.0 onwards. Can you upgrade your TensorFlow version?
pip install tensorflow==2.3.0
pip install --upgrade tensorflow

"TypeError: unsupported callable" when saving keras model using tensorlow

When saving a Keras model defined like this:
# define model
model = Sequential()
model.add(LSTM(200, activation='relu', input_shape=(n_steps_in, n_features)))
model.add(RepeatVector(n_steps_out))
model.add(LSTM(200, activation='relu', return_sequences=True))
model.add(TimeDistributed(Dense(n_features)))
model.compile(optimizer='adam', loss='mse')
model.save(path)
I got this following message:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
~/anaconda3/envs/topic_forecaster/lib/python3.7/inspect.py in getfullargspec(func)
1125 skip_bound_arg=False,
-> 1126 sigcls=Signature)
1127 except Exception as ex:
~/anaconda3/envs/topic_forecaster/lib/python3.7/inspect.py in _signature_from_callable(obj, follow_wrapper_chains, skip_bound_arg, sigcls)
2287 return _signature_from_builtin(sigcls, obj,
-> 2288 skip_bound_arg=skip_bound_arg)
2289
~/anaconda3/envs/topic_forecaster/lib/python3.7/inspect.py in _signature_from_builtin(cls, func, skip_bound_arg)
2111 if not s:
-> 2112 raise ValueError("no signature found for builtin {!r}".format(func))
2113
ValueError: no signature found for builtin <tensorflow.python.keras.saving.saved_model.save_impl.LayerCall object at 0x7f8c1f357190>
The above exception was the direct cause of the following exception:
TypeError Traceback (most recent call last)
<ipython-input-9-3fe80778ab16> in <module>
3 path = Path.cwd().parent / 'models' / 'tpi'
4 Path(path).mkdir(parents=True, exist_ok=True)
----> 5 model.save(str(path))
~/anaconda3/envs/topic_forecaster/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/network.py in save(self, filepath, overwrite, include_optimizer, save_format, signatures, options)
973 """
974 saving.save_model(self, filepath, overwrite, include_optimizer, save_format,
--> 975 signatures, options)
976
977 def save_weights(self, filepath, overwrite=True, save_format=None):
~/anaconda3/envs/topic_forecaster/lib/python3.7/site-packages/tensorflow_core/python/keras/saving/save.py in save_model(model, filepath, overwrite, include_optimizer, save_format, signatures, options)
113 else:
114 saved_model_save.save(model, filepath, overwrite, include_optimizer,
--> 115 signatures, options)
116
117
~/anaconda3/envs/topic_forecaster/lib/python3.7/site-packages/tensorflow_core/python/keras/saving/saved_model/save.py in save(model, filepath, overwrite, include_optimizer, signatures, options)
72 # default learning phase placeholder.
73 with K.learning_phase_scope(0):
---> 74 save_lib.save(model, filepath, signatures, options)
75
76 if not include_optimizer:
~/anaconda3/envs/topic_forecaster/lib/python3.7/site-packages/tensorflow_core/python/saved_model/save.py in save(obj, export_dir, signatures, options)
868 if signatures is None:
869 signatures = signature_serialization.find_function_to_export(
--> 870 checkpoint_graph_view)
871
872 signatures = signature_serialization.canonicalize_signatures(signatures)
~/anaconda3/envs/topic_forecaster/lib/python3.7/site-packages/tensorflow_core/python/saved_model/signature_serialization.py in find_function_to_export(saveable_view)
62 # If the user did not specify signatures, check the root object for a function
63 # that can be made into a signature.
---> 64 functions = saveable_view.list_functions(saveable_view.root)
65 signature = functions.get(DEFAULT_SIGNATURE_ATTR, None)
66 if signature is not None:
~/anaconda3/envs/topic_forecaster/lib/python3.7/site-packages/tensorflow_core/python/saved_model/save.py in list_functions(self, obj)
139 if obj_functions is None:
140 obj_functions = obj._list_functions_for_serialization( # pylint: disable=protected-access
--> 141 self._serialization_cache)
142 self._functions[obj] = obj_functions
143 return obj_functions
~/anaconda3/envs/topic_forecaster/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/base_layer.py in _list_functions_for_serialization(self, serialization_cache)
2420 def _list_functions_for_serialization(self, serialization_cache):
2421 return (self._trackable_saved_model_saver
-> 2422 .list_functions_for_serialization(serialization_cache))
2423
2424
~/anaconda3/envs/topic_forecaster/lib/python3.7/site-packages/tensorflow_core/python/keras/saving/saved_model/base_serialization.py in list_functions_for_serialization(self, serialization_cache)
89 `ConcreteFunction`.
90 """
---> 91 fns = self.functions_to_serialize(serialization_cache)
92
93 # The parent AutoTrackable class saves all user-defined tf.functions, and
~/anaconda3/envs/topic_forecaster/lib/python3.7/site-packages/tensorflow_core/python/keras/saving/saved_model/layer_serialization.py in functions_to_serialize(self, serialization_cache)
77 def functions_to_serialize(self, serialization_cache):
78 return (self._get_serialized_attributes(
---> 79 serialization_cache).functions_to_serialize)
80
81 def _get_serialized_attributes(self, serialization_cache):
~/anaconda3/envs/topic_forecaster/lib/python3.7/site-packages/tensorflow_core/python/keras/saving/saved_model/layer_serialization.py in _get_serialized_attributes(self, serialization_cache)
92
93 object_dict, function_dict = self._get_serialized_attributes_internal(
---> 94 serialization_cache)
95
96 serialized_attr.set_and_validate_objects(object_dict)
~/anaconda3/envs/topic_forecaster/lib/python3.7/site-packages/tensorflow_core/python/keras/saving/saved_model/model_serialization.py in _get_serialized_attributes_internal(self, serialization_cache)
51 objects, functions = (
52 super(ModelSavedModelSaver, self)._get_serialized_attributes_internal(
---> 53 serialization_cache))
54 functions['_default_save_signature'] = default_signature
55 return objects, functions
~/anaconda3/envs/topic_forecaster/lib/python3.7/site-packages/tensorflow_core/python/keras/saving/saved_model/layer_serialization.py in _get_serialized_attributes_internal(self, serialization_cache)
101 """Returns dictionary of serialized attributes."""
102 objects = save_impl.wrap_layer_objects(self.obj, serialization_cache)
--> 103 functions = save_impl.wrap_layer_functions(self.obj, serialization_cache)
104 # Attribute validator requires that the default save signature is added to
105 # function dict, even if the value is None.
~/anaconda3/envs/topic_forecaster/lib/python3.7/site-packages/tensorflow_core/python/keras/saving/saved_model/save_impl.py in wrap_layer_functions(layer, serialization_cache)
154 # Reset the losses of the layer and its children. The call function in each
155 # child layer is replaced with tf.functions.
--> 156 original_fns = _replace_child_layer_functions(layer, serialization_cache)
157 original_losses = _reset_layer_losses(layer)
158
~/anaconda3/envs/topic_forecaster/lib/python3.7/site-packages/tensorflow_core/python/keras/saving/saved_model/save_impl.py in _replace_child_layer_functions(layer, serialization_cache)
246 layer_fns = (
247 child_layer._trackable_saved_model_saver._get_serialized_attributes(
--> 248 serialization_cache).functions)
249 else:
250 layer_fns = (
~/anaconda3/envs/topic_forecaster/lib/python3.7/site-packages/tensorflow_core/python/keras/saving/saved_model/layer_serialization.py in _get_serialized_attributes(self, serialization_cache)
92
93 object_dict, function_dict = self._get_serialized_attributes_internal(
---> 94 serialization_cache)
95
96 serialized_attr.set_and_validate_objects(object_dict)
~/anaconda3/envs/topic_forecaster/lib/python3.7/site-packages/tensorflow_core/python/keras/saving/saved_model/layer_serialization.py in _get_serialized_attributes_internal(self, serialization_cache)
101 """Returns dictionary of serialized attributes."""
102 objects = save_impl.wrap_layer_objects(self.obj, serialization_cache)
--> 103 functions = save_impl.wrap_layer_functions(self.obj, serialization_cache)
104 # Attribute validator requires that the default save signature is added to
105 # function dict, even if the value is None.
~/anaconda3/envs/topic_forecaster/lib/python3.7/site-packages/tensorflow_core/python/keras/saving/saved_model/save_impl.py in wrap_layer_functions(layer, serialization_cache)
164 call_fn_with_losses = call_collection.add_function(
165 _wrap_call_and_conditional_losses(layer),
--> 166 '{}_layer_call_and_return_conditional_losses'.format(layer.name))
167 call_fn = call_collection.add_function(
168 _extract_outputs_from_fn(layer, call_fn_with_losses),
~/anaconda3/envs/topic_forecaster/lib/python3.7/site-packages/tensorflow_core/python/keras/saving/saved_model/save_impl.py in add_function(self, call_fn, name)
492 # Manually add traces for layers that have keyword arguments and have
493 # a fully defined input signature.
--> 494 self.add_trace(*self._input_signature)
495 return fn
496
~/anaconda3/envs/topic_forecaster/lib/python3.7/site-packages/tensorflow_core/python/keras/saving/saved_model/save_impl.py in add_trace(self, *args, **kwargs)
411 fn.get_concrete_function(*args, **kwargs)
412
--> 413 trace_with_training(True)
414 trace_with_training(False)
415 else:
~/anaconda3/envs/topic_forecaster/lib/python3.7/site-packages/tensorflow_core/python/keras/saving/saved_model/save_impl.py in trace_with_training(value, fn)
409 utils.set_training_arg(value, self._training_arg_index, args, kwargs)
410 with K.learning_phase_scope(value):
--> 411 fn.get_concrete_function(*args, **kwargs)
412
413 trace_with_training(True)
~/anaconda3/envs/topic_forecaster/lib/python3.7/site-packages/tensorflow_core/python/keras/saving/saved_model/save_impl.py in get_concrete_function(self, *args, **kwargs)
536 if not self.call_collection.tracing:
537 self.call_collection.add_trace(*args, **kwargs)
--> 538 return super(LayerCall, self).get_concrete_function(*args, **kwargs)
539
540
~/anaconda3/envs/topic_forecaster/lib/python3.7/site-packages/tensorflow_core/python/eager/def_function.py in get_concrete_function(self, *args, **kwargs)
774 if self._stateful_fn is None:
775 initializer_map = object_identity.ObjectIdentityDictionary()
--> 776 self._initialize(args, kwargs, add_initializers_to=initializer_map)
777 self._initialize_uninitialized_variables(initializer_map)
778
~/anaconda3/envs/topic_forecaster/lib/python3.7/site-packages/tensorflow_core/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
406 self._concrete_stateful_fn = (
407 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
--> 408 *args, **kwds))
409
410 def invalid_creator_scope(*unused_args, **unused_kwds):
~/anaconda3/envs/topic_forecaster/lib/python3.7/site-packages/tensorflow_core/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
1846 if self.input_signature:
1847 args, kwargs = None, None
-> 1848 graph_function, _, _ = self._maybe_define_function(args, kwargs)
1849 return graph_function
1850
~/anaconda3/envs/topic_forecaster/lib/python3.7/site-packages/tensorflow_core/python/eager/function.py in _maybe_define_function(self, args, kwargs)
2148 graph_function = self._function_cache.primary.get(cache_key, None)
2149 if graph_function is None:
-> 2150 graph_function = self._create_graph_function(args, kwargs)
2151 self._function_cache.primary[cache_key] = graph_function
2152 return graph_function, args, kwargs
~/anaconda3/envs/topic_forecaster/lib/python3.7/site-packages/tensorflow_core/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
2039 arg_names=arg_names,
2040 override_flat_arg_shapes=override_flat_arg_shapes,
-> 2041 capture_by_value=self._capture_by_value),
2042 self._function_attributes,
2043 # Tell the ConcreteFunction to clean up its graph once it goes out of
~/anaconda3/envs/topic_forecaster/lib/python3.7/site-packages/tensorflow_core/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
913 converted_func)
914
--> 915 func_outputs = python_func(*func_args, **func_kwargs)
916
917 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
~/anaconda3/envs/topic_forecaster/lib/python3.7/site-packages/tensorflow_core/python/eager/def_function.py in wrapped_fn(*args, **kwds)
356 # __wrapped__ allows AutoGraph to swap in a converted function. We give
357 # the function a weak reference to itself to avoid a reference cycle.
--> 358 return weak_wrapped_fn().__wrapped__(*args, **kwds)
359 weak_wrapped_fn = weakref.ref(wrapped_fn)
360
~/anaconda3/envs/topic_forecaster/lib/python3.7/site-packages/tensorflow_core/python/keras/saving/saved_model/save_impl.py in wrapper(*args, **kwargs)
513 layer, inputs=inputs, build_graph=False, training=training,
514 saving=True):
--> 515 ret = method(*args, **kwargs)
516 _restore_layer_losses(original_losses)
517 return ret
~/anaconda3/envs/topic_forecaster/lib/python3.7/site-packages/tensorflow_core/python/keras/saving/saved_model/utils.py in wrap_with_training_arg(*args, **kwargs)
109 training,
110 lambda: replace_training_and_call(True),
--> 111 lambda: replace_training_and_call(False))
112
113 # Create arg spec for decorated function. If 'training' is not defined in the
~/anaconda3/envs/topic_forecaster/lib/python3.7/site-packages/tensorflow_core/python/keras/utils/tf_utils.py in smart_cond(pred, true_fn, false_fn, name)
57 pred, true_fn=true_fn, false_fn=false_fn, name=name)
58 return smart_module.smart_cond(
---> 59 pred, true_fn=true_fn, false_fn=false_fn, name=name)
60
61
~/anaconda3/envs/topic_forecaster/lib/python3.7/site-packages/tensorflow_core/python/framework/smart_cond.py in smart_cond(pred, true_fn, false_fn, name)
52 if pred_value is not None:
53 if pred_value:
---> 54 return true_fn()
55 else:
56 return false_fn()
~/anaconda3/envs/topic_forecaster/lib/python3.7/site-packages/tensorflow_core/python/keras/saving/saved_model/utils.py in <lambda>()
108 return tf_utils.smart_cond(
109 training,
--> 110 lambda: replace_training_and_call(True),
111 lambda: replace_training_and_call(False))
112
~/anaconda3/envs/topic_forecaster/lib/python3.7/site-packages/tensorflow_core/python/keras/saving/saved_model/utils.py in replace_training_and_call(training)
104 def replace_training_and_call(training):
105 set_training_arg(training, training_arg_index, args, kwargs)
--> 106 return wrapped_call(*args, **kwargs)
107
108 return tf_utils.smart_cond(
~/anaconda3/envs/topic_forecaster/lib/python3.7/site-packages/tensorflow_core/python/keras/saving/saved_model/save_impl.py in call_and_return_conditional_losses(inputs, *args, **kwargs)
555 layer_call = _get_layer_call_method(layer)
556 def call_and_return_conditional_losses(inputs, *args, **kwargs):
--> 557 return layer_call(inputs, *args, **kwargs), layer.get_losses_for(inputs)
558 return _create_call_fn_decorator(layer, call_and_return_conditional_losses)
559
~/anaconda3/envs/topic_forecaster/lib/python3.7/site-packages/tensorflow_core/python/keras/layers/wrappers.py in call(self, inputs, training, mask)
218 def call(self, inputs, training=None, mask=None):
219 kwargs = {}
--> 220 if generic_utils.has_arg(self.layer.call, 'training'):
221 kwargs['training'] = training
222
~/anaconda3/envs/topic_forecaster/lib/python3.7/site-packages/tensorflow_core/python/keras/utils/generic_utils.py in has_arg(fn, name, accept_all)
302 bool, whether `fn` accepts a `name` keyword argument.
303 """
--> 304 arg_spec = tf_inspect.getfullargspec(fn)
305 if accept_all and arg_spec.varkw is not None:
306 return True
~/anaconda3/envs/topic_forecaster/lib/python3.7/site-packages/tensorflow_core/python/util/tf_inspect.py in getfullargspec(obj)
255 if d.decorator_argspec is not None:
256 return _convert_maybe_argspec_to_fullargspec(d.decorator_argspec)
--> 257 return _getfullargspec(target)
258
259
~/anaconda3/envs/topic_forecaster/lib/python3.7/inspect.py in getfullargspec(func)
1130 # else. So to be fully backwards compatible, we catch all
1131 # possible exceptions here, and reraise a TypeError.
-> 1132 raise TypeError('unsupported callable') from ex
1133
1134 args = []
TypeError: unsupported callable
However, when saving a regular model without a TimeDistributed layer as below, it worked fine:
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10)
])
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model.compile(optimizer='adam',
loss=loss_fn,
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=5)
model.evaluate(x_test, y_test, verbose=2)
model.save('temp', save_format='tf')
By upgrading Tensorflow to 2.1 with CUDA10 using Anaconda as below, the problem is solved.
conda create -n tf-gpu-cuda10 tensorflow-gpu=2.1 cudatoolkit=10
conda activate tf-gpu-cuda10

Using tf.data.Dataset with tf Hub Modules

How do I feed a tf.keras model, that includes a 1D input TF Hub module, with a tf.data.Dataset?
(Ultimately, the aim is to use a single tf.data.Dataset with a multi-input, multi-output keras funtional api model.)
Tried this:
import tensorflow as tf
import tensorflow_hub as hub
embed = "https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1"
hub_layer = hub.KerasLayer(embed, output_shape=[20], input_shape=[],
dtype=tf.string, trainable=True, name='hub_layer')
# From tf hub webpage: "The module takes a batch of sentences in a 1-D tensor of strings as input."
input_tensor = tf.keras.Input(shape=(), dtype=tf.string)
hub_tensor = hub_layer(input_tensor)
x = tf.keras.layers.Dense(16, activation='relu')(hub_tensor)#(x)
main_output = tf.keras.layers.Dense(units=4, activation='softmax', name='main_output')(x)
model = tf.keras.models.Model(inputs=[input_tensor], outputs=[main_output])
# This works as expected.
X_tensor = tf.constant(['Hello World', 'The Quick Brown Fox'])
model(X_tensor)
# This fails
X_ds = tf.data.Dataset.from_tensors(X_tensor)
X_ds.element_spec
model(X_ds)
Expectation was that the 1D tensor in the dataset would be automatically extracted and consumed by the model.
Error message:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
in
21 X_ds = tf.data.Dataset.from_tensors(X_tensor)
22 X_ds.element_spec
---> 23 model(X_ds)
24
25
~/projects/email_analysis/email_venv/lib/python3.6/site-packages/tensorflow/python/keras/engine/base_layer.py in __call__(self, *args, **kwargs)
966 with base_layer_utils.autocast_context_manager(
967 self._compute_dtype):
--> 968 outputs = self.call(cast_inputs, *args, **kwargs)
969 self._handle_activity_regularization(inputs, outputs)
970 self._set_mask_metadata(inputs, outputs, input_masks)
~/projects/email_analysis/email_venv/lib/python3.6/site-packages/tensorflow/python/keras/engine/network.py in call(self, inputs, training, mask)
717 return self._run_internal_graph(
718 inputs, training=training, mask=mask,
--> 719 convert_kwargs_to_constants=base_layer_utils.call_context().saving)
720
721 def compute_output_shape(self, input_shape):
~/projects/email_analysis/email_venv/lib/python3.6/site-packages/tensorflow/python/keras/engine/network.py in _run_internal_graph(self, inputs, training, mask, convert_kwargs_to_constants)
835 tensor_dict = {}
836 for x, y in zip(self.inputs, inputs):
--> 837 y = self._conform_to_reference_input(y, ref_input=x)
838 x_id = str(id(x))
839 tensor_dict[x_id] = [y] * self._tensor_usage_count[x_id]
~/projects/email_analysis/email_venv/lib/python3.6/site-packages/tensorflow/python/keras/engine/network.py in _conform_to_reference_input(self, tensor, ref_input)
959 # Dtype handling.
960 if isinstance(ref_input, (ops.Tensor, composite_tensor.CompositeTensor)):
--> 961 tensor = math_ops.cast(tensor, dtype=ref_input.dtype)
962
963 return tensor
~/projects/email_analysis/email_venv/lib/python3.6/site-packages/tensorflow/python/util/dispatch.py in wrapper(*args, **kwargs)
178 """Call target, and fall back on dispatchers if there is a TypeError."""
179 try:
--> 180 return target(*args, **kwargs)
181 except (TypeError, ValueError):
182 # Note: convert_to_eager_tensor currently raises a ValueError, not a
~/projects/email_analysis/email_venv/lib/python3.6/site-packages/tensorflow/python/ops/math_ops.py in cast(x, dtype, name)
785 # allows some conversions that cast() can't do, e.g. casting numbers to
786 # strings.
--> 787 x = ops.convert_to_tensor(x, name="x")
788 if x.dtype.base_dtype != base_type:
789 x = gen_math_ops.cast(x, base_type, name=name)
~/projects/email_analysis/email_venv/lib/python3.6/site-packages/tensorflow/python/framework/ops.py in convert_to_tensor(value, dtype, name, as_ref, preferred_dtype, dtype_hint, ctx, accepted_result_types)
1339
1340 if ret is None:
-> 1341 ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
1342
1343 if ret is NotImplemented:
~/projects/email_analysis/email_venv/lib/python3.6/site-packages/tensorflow/python/framework/constant_op.py in _constant_tensor_conversion_function(v, dtype, name, as_ref)
319 as_ref=False):
320 _ = as_ref
--> 321 return constant(v, dtype=dtype, name=name)
322
323
~/projects/email_analysis/email_venv/lib/python3.6/site-packages/tensorflow/python/framework/constant_op.py in constant(value, dtype, shape, name)
260 """
261 return _constant_impl(value, dtype, shape, name, verify_shape=False,
--> 262 allow_broadcast=True)
263
264
~/projects/email_analysis/email_venv/lib/python3.6/site-packages/tensorflow/python/framework/constant_op.py in _constant_impl(value, dtype, shape, name, verify_shape, allow_broadcast)
268 ctx = context.context()
269 if ctx.executing_eagerly():
--> 270 t = convert_to_eager_tensor(value, ctx, dtype)
271 if shape is None:
272 return t
~/projects/email_analysis/email_venv/lib/python3.6/site-packages/tensorflow/python/framework/constant_op.py in convert_to_eager_tensor(value, ctx, dtype)
94 dtype = dtypes.as_dtype(dtype).as_datatype_enum
95 ctx.ensure_initialized()
---> 96 return ops.EagerTensor(value, ctx.device_name, dtype)
97
98
ValueError: Attempt to convert a value () with an unsupported type () to a Tensor.
The point of a dataset is to provide a sequence of tensors, like here:
all_data = tf.constant([['Hello', 'World'], ['Brown Fox', 'lazy dog']])
ds = tf.data.Dataset.from_tensor_slices(all_data)
for tensor in ds:
print(tensor)
which outputs
tf.Tensor([b'Hello' b'World'], shape=(2,), dtype=string)
tf.Tensor([b'Brown Fox' b'lazy dog'], shape=(2,), dtype=string)
Instead of just printing tensor, you can compute with it:
for tensor in ds:
print(hub_layer(tensor))
which outputs 2 tensors of shape (2,20) each.
For more, see https://www.tensorflow.org/guide/data.

tensor forest estimator value error at fitting the training part

Code :
from sklearn import cross_validation as cv
import numpy as np
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.tensor_forest.python import tensor_forest
X = np.array([[ 74., 166., 331., 161., 159., 181., 180.],
[ 437., 427., 371., 361., 393., 465., 464.],
[ 546., 564., 588., 595., 536., 537., 520.],
[ 89., 89., 87., 87., 108., 113., 111.],
[ 75., 90., 74., 89., 130., 140., 135.]])
Y = np.array([[ 51., 43., 29., 43., 43., 41., 42.],
[ 22., 23., 26., 27., 25., 19., 19.],
[ 7., 7., 5., 5., 9., 8., 10.],
[ 55., 54., 55., 53., 51., 51., 51.],
[ 58., 57., 57., 58., 55., 55., 55.]])
train_X, test_X, train_Y, test_Y = cv.train_test_split(X, Y,
test_size=0.50, random_state=42)
def build_estimator() :
params = tensor_forest.ForestHParams(num_classes=7, num_features=7,
num_trees=30, max_nodes=100)
graph_builder_class = tensor_forest.RandomForestGraphs
graph_builder_class = tensor_forest.TrainingLossForest
return estimator.SKCompat(random_forest.TensorForestEstimator(
params, graph_builder_class=graph_builder_class,
model_dir=None))
est = build_estimator()
train_X = train_X.astype(dtype = np.float32)
train_Y = train_Y.astype(dtype = np.float32)
est = est.fit(x=train_X, y=train_Y, batch_size = 100)
My both input and output shape is [Number_of_samples,7]. It perfectly runs with scikitlearn random forest classifier. However for tf learn, I get the following error when fitting the estimator :
INFO:tensorflow:Constructing forest with params =
INFO:tensorflow:{'valid_leaf_threshold': 1, 'split_after_samples':
250, 'num_output_columns': 8, 'feature_bagging_fraction': 1.0,
'split_initializations_per_input': 1, 'bagged_features': None,
'min_split_samples': 5, 'max_nodes': 100, 'num_features': 7,
'num_trees': 30, 'num_splits_to_consider': 7, 'base_random_seed': 0,
'num_outputs': 1, 'dominate_fraction': 0.99, 'max_fertile_nodes': 50,
'bagged_num_features': 7, 'dominate_method': 'bootstrap',
'bagging_fraction': 1.0, 'regression': False, 'num_classes': 7}
ValueErrorTraceback (most recent call last)
in ()
----> 1 est = est.fit(x=train_X, y=train_Y, batch_size = 100)
/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.pyc
in fit(self, x, y, batch_size, steps, max_steps, monitors) 1351
steps=steps, 1352 max_steps=max_steps,
-> 1353 monitors=all_monitors) 1354 return self 1355
/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/tensor_forest/client/random_forest.pyc
in fit(self, x, y, input_fn, steps, batch_size, monitors, max_steps)
262 elif input is not None:
263 self._estimator.fit(input_fn=input_fn, steps=steps, monitors=monitors,
--> 264 max_steps=max_steps)
265 else:
266 raise ValueError('fit: Must provide either both x and y or input_fn.')
/usr/local/lib/python2.7/dist-packages/tensorflow/python/util/deprecation.pyc
in new_func(*args, **kwargs)
278 _call_location(), decorator_utils.get_qualified_name(func),
279 func.module, arg_name, date, instructions)
--> 280 return func(*args, **kwargs)
281 new_func.doc = _add_deprecated_arg_notice_to_docstring(
282 func.doc, date, instructions)
/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.pyc
in fit(self, x, y, input_fn, steps, batch_size, monitors, max_steps)
424 hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps))
425
--> 426 loss = self._train_model(input_fn=input_fn, hooks=hooks)
427 logging.info('Loss for final step: %s.', loss)
428 return self
/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.pyc
in _train_model(self, input_fn, hooks)
932 features, labels = input_fn()
933 self._check_inputs(features, labels)
--> 934 model_fn_ops = self._call_legacy_get_train_ops(features, labels)
935 ops.add_to_collection(ops.GraphKeys.LOSSES, model_fn_ops.loss)
936 all_hooks.extend([
/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.pyc
in _call_legacy_get_train_ops(self, features, labels) 1001 1002
def _call_legacy_get_train_ops(self, features, labels):
-> 1003 train_ops = self._get_train_ops(features, labels) 1004 if isinstance(train_ops, model_fn_lib.ModelFnOps): # Default
signature 1005 return train_ops
/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.pyc
in _get_train_ops(self, features, labels) 1160 ModelFnOps
object. 1161 """
-> 1162 return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.TRAIN) 1163 1164 def
_get_eval_ops(self, features, labels, metrics):
/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.pyc
in _call_model_fn(self, features, labels, mode) 1131 if
'model_dir' in model_fn_args: 1132 kwargs['model_dir'] =
self.model_dir
-> 1133 model_fn_results = self._model_fn(features, labels, **kwargs) 1134 1135 if isinstance(model_fn_results, model_fn_lib.ModelFnOps):
/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/tensor_forest/client/random_forest.pyc
in _model_fn(features, labels)
128 if labels is not None:
129 training_loss = graph_builder.training_loss(
--> 130 features, labels, name=LOSS_NAME)
131 training_graph = control_flow_ops.group(
132 graph_builder.training_graph(
/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/tensor_forest/python/tensor_forest.pyc
in training_loss(self, features, labels, name)
559
560 def training_loss(self, features, labels, name='training_loss'):
--> 561 return array_ops.identity(self._get_loss(features, labels), name=name)
562
563
/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/tensor_forest/python/tensor_forest.pyc
in _get_loss(self, features, labels)
548 self._loss = control_flow_ops.cond(
549 self.average_size() > 0, _average_loss,
--> 550 lambda: constant_op.constant(sys.maxsize, dtype=dtypes.float32))
551
552 return self._loss
/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.pyc
in cond(pred, fn1, fn2, name) 1757 context_t =
CondContext(pred, pivot_1, branch=1) 1758 context_t.Enter()
-> 1759 orig_res, res_t = context_t.BuildCondBranch(fn1) 1760 context_t.ExitResult(res_t) 1761 context_t.Exit()
/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.pyc
in BuildCondBranch(self, fn) 1658 def BuildCondBranch(self, fn):
1659 """Add the subgraph defined by fn() to the graph."""
-> 1660 r = fn() 1661 original_r = r 1662 result = []
/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/tensor_forest/python/tensor_forest.pyc
in _average_loss()
544 probs = self.inference_graph(features)
545 return math_ops.reduce_sum(self.loss_fn(
--> 546 probs, labels)) / math_ops.to_float(array_ops.shape(labels)[0])
547
548 self._loss = control_flow_ops.cond(
/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/tensor_forest/python/tensor_forest.pyc
in _loss(probs, targets)
508 def _loss(probs, targets):
509 if targets.get_shape().ndims > 1:
--> 510 targets = array_ops.squeeze(targets, squeeze_dims=[1])
511 one_hot_labels = array_ops.one_hot(
512 math_ops.to_int32(targets),
/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/array_ops.pyc
in squeeze(input, axis, name, squeeze_dims) 2270 if
np.isscalar(axis): 2271 axis = [axis]
-> 2272 return gen_array_ops._squeeze(input, axis, name) 2273 2274
/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gen_array_ops.pyc
in _squeeze(input, squeeze_dims, name) 3496 """ 3497 result
= _op_def_lib.apply_op("Squeeze", input=input,
-> 3498 squeeze_dims=squeeze_dims, name=name) 3499 return result 3500
/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/op_def_library.pyc
in apply_op(self, op_type_name, name, **keywords)
761 op = g.create_op(op_type_name, inputs, output_types, name=scope,
762 input_types=input_types, attrs=attr_protos,
--> 763 op_def=op_def)
764 if output_structure:
765 outputs = op.outputs
/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.pyc
in create_op(self, op_type, inputs, dtypes, input_types, name, attrs,
op_def, compute_shapes, compute_device) 2395
original_op=self._default_original_op, op_def=op_def) 2396 if
compute_shapes:
-> 2397 set_shapes_for_outputs(ret) 2398 self._add_op(ret) 2399
self._record_op_seen_by_control_dependencies(ret)
/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.pyc
in set_shapes_for_outputs(op) 1755 shape_func =
_call_cpp_shape_fn_and_require_op 1756
-> 1757 shapes = shape_func(op) 1758 if shapes is None: 1759 raise RuntimeError(
/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.pyc
in call_with_requiring(op) 1705 1706 def
call_with_requiring(op):
-> 1707 return call_cpp_shape_fn(op, require_shape_fn=True) 1708 1709 _call_cpp_shape_fn_and_require_op =
call_with_requiring
/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/common_shapes.pyc
in call_cpp_shape_fn(op, input_tensors_needed,
input_tensors_as_shapes_needed, debug_python_shape_fn,
require_shape_fn)
608 res = _call_cpp_shape_fn_impl(op, input_tensors_needed,
609 input_tensors_as_shapes_needed,
--> 610 debug_python_shape_fn, require_shape_fn)
611 if not isinstance(res, dict):
612 # Handles the case where _call_cpp_shape_fn_impl calls unknown_shape(op).
/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/common_shapes.pyc
in _call_cpp_shape_fn_impl(op, input_tensors_needed,
input_tensors_as_shapes_needed, debug_python_shape_fn,
require_shape_fn)
673 missing_shape_fn = True
674 else:
--> 675 raise ValueError(err.message)
676
677 if missing_shape_fn:
ValueError: Can not squeeze dim[1], expected a dimension of 1, got 7
for 'cond/Squeeze' (op: 'Squeeze') with input shapes: [?,7].
Right now, when using TrainingLossForest it assumes a classification problem. I will fix this internally, but for now you can get around it with something like:
from tensorflow.contrib.losses.python.losses import loss_ops
def _loss_fn(values, targets):
return loss_ops.mean_squared_error(values, targets)
def _builder_class(params, **kwargs):
return tensor_forest.TrainingLossForest(
params, loss_fn=_loss_fn, **kwargs)
TensorForestEstimator(..., graph_builder_class=_builder_class)
Or whatever loss function you want (MSE here as example). Also, simply using (graph_builder_class = tensor_forest.RandomForestGraphs) should suffice, but the loss is the number of nodes, so training stops when the forest stops growing or reaches max_nodes, which is maybe not what you want.