When using Keras Tuner, there doesn't seem to be a way to allow the skipping of a problematic combination of hyperparams. For example, the number of filters in a Conv1D layer may not be compatible with all values of pool size in the following MaxPooling1D layer and thus lead to an error in model building. However, this may not be known before running the tuner. Once the tuner is run, this will lead to an error that will terminate the whole tuning process. Is there a way to skip any hyperparam combinations that result in an error?
Sample code:
def model_builder(hp):
model = Sequential()
model.add(
Embedding(
input_dim=hp.Int(
'vocab_size',
min_value=4000,
max_value=10000,
step=1000,
default=4000
),
output_dim=hp.Choice(
'embedding_dim',
values=[32, 64, 128, 256],
default=32
),
input_length=hp.Int(
'max_length',
min_value=50,
max_value=200,
step=10,
default=50
)
)
)
model.add(
Conv1D(
filters=hp.Choice(
'num_filters_1',
values=[32, 64],
default=32
),
kernel_size=hp.Choice(
'kernel_size_1',
values=[3, 5, 7, 9],
default=7
),
activation='relu'
)
)
model.add(
MaxPooling1D(
pool_size=hp.Choice(
'pool_size',
values=[3, 5],
default=5
)
)
)
model.add(
Conv1D(
filters=hp.Choice(
'num_filters_2',
values=[32, 64],
default=32
),
kernel_size=hp.Choice(
'kernel_size_2',
values=[3, 5, 7, 9],
default=7
),
activation='relu'
)
)
model.add(
GlobalMaxPooling1D()
)
model.add(
Dropout(
rate=hp.Float(
'dropout_1',
min_value=0.0,
max_value=0.5,
default=0.5,
step=0.05
)
)
)
model.add(
Dense(
units=hp.Int(
'units',
min_value=10,
max_value=100,
step=10,
default=10
),
kernel_regularizer=tf.keras.regularizers.l2(
hp.Float(
'regularizer_1',
min_value=1e-4,
max_value=1e-1,
sampling='LOG',
default=1e-2
)
),
activation='relu'
)
)
model.add(
Dropout(
hp.Float(
'dropout_2',
min_value=0.0,
max_value=0.5,
default=0.5,
step=0.05
)
)
)
model.add(
Dense(
1,
kernel_regularizer=tf.keras.regularizers.l2(
hp.Float(
'regularizer_2',
min_value=1e-4,
max_value=1e-1,
sampling='LOG',
default=1e-2
)
),
activation='sigmoid'
)
)
model.compile(
loss='binary_crossentropy',
optimizer=hp.Choice(
'optimizer',
values=['rmsprop', 'adam', 'sgd']
),
metrics=['accuracy']
)
return model
tuner = kt.Hyperband(
model_builder,
objective='val_accuracy',
max_epochs=20,
#factor=3,
directory='my_dir',
project_name='cec',
seed=seed
)
class ClearTrainingOutput(tf.keras.callbacks.Callback):
def on_train_end(*args, **kwargs):
IPython.display.clear_output(wait=True)
tuner.search(
X_train,
y_train,
epochs=20,
validation_data=(X_test, y_test),
callbacks=[ClearTrainingOutput()]
)
The error message:
Epoch 1/3
WARNING:tensorflow:Model was constructed with shape (None, 150) for input Tensor("embedding_input:0", shape=(None, 150), dtype=float32), but it was called on an input with incompatible shape (32, 50).
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-62-16a1eae457d8> in <module>
3 IPython.display.clear_output(wait=True)
4
----> 5 tuner.search(
6 X_train,
7 y_train,
~/anaconda3/envs/cec/lib/python3.8/site-packages/kerastuner/engine/base_tuner.py in search(self, *fit_args, **fit_kwargs)
128
129 self.on_trial_begin(trial)
--> 130 self.run_trial(trial, *fit_args, **fit_kwargs)
131 self.on_trial_end(trial)
132 self.on_search_end()
~/anaconda3/envs/cec/lib/python3.8/site-packages/kerastuner/tuners/hyperband.py in run_trial(self, trial, *fit_args, **fit_kwargs)
385 fit_kwargs['epochs'] = hp.values['tuner/epochs']
386 fit_kwargs['initial_epoch'] = hp.values['tuner/initial_epoch']
--> 387 super(Hyperband, self).run_trial(trial, *fit_args, **fit_kwargs)
388
389 def _build_model(self, hp):
~/anaconda3/envs/cec/lib/python3.8/site-packages/kerastuner/engine/multi_execution_tuner.py in run_trial(self, trial, *fit_args, **fit_kwargs)
94
95 model = self.hypermodel.build(trial.hyperparameters)
---> 96 history = model.fit(*fit_args, **copied_fit_kwargs)
97 for metric, epoch_values in history.history.items():
98 if self.oracle.objective.direction == 'min':
~/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py in _method_wrapper(self, *args, **kwargs)
64 def _method_wrapper(self, *args, **kwargs):
65 if not self._in_multi_worker_mode(): # pylint: disable=protected-access
---> 66 return method(self, *args, **kwargs)
67
68 # Running inside `run_distribute_coordinator` already.
~/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
846 batch_size=batch_size):
847 callbacks.on_train_batch_begin(step)
--> 848 tmp_logs = train_function(iterator)
849 # Catch OutOfRangeError for Datasets of unknown size.
850 # This blocks until the batch has finished executing.
~/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
578 xla_context.Exit()
579 else:
--> 580 result = self._call(*args, **kwds)
581
582 if tracing_count == self._get_tracing_count():
~/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds)
625 # This is the first call of __call__, so we have to initialize.
626 initializers = []
--> 627 self._initialize(args, kwds, add_initializers_to=initializers)
628 finally:
629 # At this point we know that the initialization is complete (or less
~/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
503 self._graph_deleter = FunctionDeleter(self._lifted_initializer_graph)
504 self._concrete_stateful_fn = (
--> 505 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
506 *args, **kwds))
507
~/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
2444 args, kwargs = None, None
2445 with self._lock:
-> 2446 graph_function, _, _ = self._maybe_define_function(args, kwargs)
2447 return graph_function
2448
~/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _maybe_define_function(self, args, kwargs)
2775
2776 self._function_cache.missed.add(call_context_key)
-> 2777 graph_function = self._create_graph_function(args, kwargs)
2778 self._function_cache.primary[cache_key] = graph_function
2779 return graph_function, args, kwargs
~/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
2655 arg_names = base_arg_names + missing_arg_names
2656 graph_function = ConcreteFunction(
-> 2657 func_graph_module.func_graph_from_py_func(
2658 self._name,
2659 self._python_function,
~/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
979 _, original_func = tf_decorator.unwrap(python_func)
980
--> 981 func_outputs = python_func(*func_args, **func_kwargs)
982
983 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
~/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in wrapped_fn(*args, **kwds)
439 # __wrapped__ allows AutoGraph to swap in a converted function. We give
440 # the function a weak reference to itself to avoid a reference cycle.
--> 441 return weak_wrapped_fn().__wrapped__(*args, **kwds)
442 weak_wrapped_fn = weakref.ref(wrapped_fn)
443
~/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
966 except Exception as e: # pylint:disable=broad-except
967 if hasattr(e, "ag_error_metadata"):
--> 968 raise e.ag_error_metadata.to_exception(e)
969 else:
970 raise
ValueError: in user code:
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py:571 train_function *
outputs = self.distribute_strategy.run(
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/distribute/distribute_lib.py:951 run **
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/distribute/distribute_lib.py:2290 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/distribute/distribute_lib.py:2649 _call_for_each_replica
return fn(*args, **kwargs)
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py:531 train_step **
y_pred = self(x, training=True)
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/keras/engine/base_layer.py:927 __call__
outputs = call_fn(cast_inputs, *args, **kwargs)
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/keras/engine/sequential.py:277 call
return super(Sequential, self).call(inputs, training=training, mask=mask)
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/keras/engine/network.py:717 call
return self._run_internal_graph(
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/keras/engine/network.py:888 _run_internal_graph
output_tensors = layer(computed_tensors, **kwargs)
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/keras/engine/base_layer.py:927 __call__
outputs = call_fn(cast_inputs, *args, **kwargs)
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/keras/layers/convolutional.py:207 call
outputs = self._convolution_op(inputs, self.kernel)
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/ops/nn_ops.py:1106 __call__
return self.conv_op(inp, filter)
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/ops/nn_ops.py:638 __call__
return self.call(inp, filter)
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/ops/nn_ops.py:231 __call__
return self.conv_op(
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/ops/nn_ops.py:220 _conv1d
return conv1d(
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/util/deprecation.py:574 new_func
return func(*args, **kwargs)
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/util/deprecation.py:574 new_func
return func(*args, **kwargs)
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/ops/nn_ops.py:1655 conv1d
result = gen_nn_ops.conv2d(
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/ops/gen_nn_ops.py:965 conv2d
_, _, _op, _outputs = _op_def_library._apply_op_helper(
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/framework/op_def_library.py:742 _apply_op_helper
op = g._create_op_internal(op_type_name, inputs, dtypes=None,
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py:593 _create_op_internal
return super(FuncGraph, self)._create_op_internal( # pylint: disable=protected-access
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/framework/ops.py:3319 _create_op_internal
ret = Operation(
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/framework/ops.py:1816 __init__
self._c_op = _create_c_op(self._graph, node_def, inputs,
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/framework/ops.py:1657 _create_c_op
raise ValueError(str(e))
ValueError: Negative dimension size caused by subtracting 7 from 6 for '{{node sequential/conv1d_1/conv1d}} = Conv2D[T=DT_FLOAT, data_format="NHWC", dilations=[1, 1, 1, 1], explicit_paddings=[], padding="VALID", strides=[1, 1, 1, 1], use_cudnn_on_gpu=true](sequential/conv1d_1/conv1d/ExpandDims, sequential/conv1d_1/conv1d/ExpandDims_1)' with input shapes: [32,1,6,32], [1,7,32,32].
I myself have been looking for a solution to this problem for a very long time and found it. Yes, not very elegant, but it works. I'll leave it here, maybe it will help someone else.
The point is to wrap the model construction in a try-except block and, if a Value Error occurs, build a highly simplified model. The important thing here is to create your own loss function that would return too large a loss value, which we could catch with our own callback function and stop training the model (code with an example for convolutional neural networks attached)
P.S. It would be possible to make your own loss function return NaN, and catch it with a ready-made callback function TerminateOnNaN(). But for some reason keras-tuner thinks that NaN < any number, and therefore it will give the value NaN for best val_loss
def invalid_loss(y_true, y_pred):
return keras.losses.BinaryCrossentropy()(y_true, y_pred) + 2000000
def invalid_model():
model = keras.Sequential()
model.add(layers.Input((input_shape)))
model.add(layers.Resizing(height=2, width=2))
model.add(layers.Conv2D(filters=1,
kernel_size=2,
activation='relu',
))
model.add(layers.GlobalMaxPooling2D())
model.add(layers.Dense(units=output_shape,
activation="sigmoid",
))
model.compile(optimizer="Adam",
loss=invalid_loss,
metrics=[metrics.BinaryAccuracy()])
return model
def build_model(hp):
try:
model = keras.Sequential()
model.add(layers.Input((input_shape)))
...
model.add(layers.Dense(units=output_shape,
activation=dense_activation))
model.compile(optimizer="Adam",
loss=losses.BinaryCrossentropy(),
metrics=[metrics.BinaryAccuracy()])
except ValueError:
model = invalid_model()
return model
And here is an example of your own callback, which would stop training so as not to waste time on a "invalid" model
class EarlyStoppingByLoss(keras.callbacks.Callback):
def __init__(self, max_loss):
self.max_loss = max_loss
def on_train_batch_end(self, batch, logs=None):
if logs["loss"] >= self.max_loss:
self.model.stop_training = True
You can also control oversized models (for example, if you use Flatten() when switching from convolutional layers to fully connected ones)
from keras.utils.layer_utils import count_params
class EarlyStoppingByModelSize(keras.callbacks.Callback):
def __init__(self, max_size):
self.max_size = max_size
def on_train_begin(self, logs=None):
trainable_count = count_params(self.model.trainable_weights)
if trainable_count > self.max_size:
self.model.stop_training = True
And, accordingly, at the end we add these callbacks to the list and use them when training the model
callbacks = []
callbacks.append(EarlyStoppingByLoss(900000))
callbacks.append(EarlyStoppingByModelSize(12000000))
tuner.search(x=x_train,
y=y_train,
epochs=epochs,
validation_data=(x_test, y_test),
callbacks=callbacks,
verbose=1,
batch_size=batch_size)
Related
I'm very new to Tensorflow/Keras and deep learning, so my apologies in advance.
I'm creating a basic mixed convolutional neural net to classify images and metadata. I've created the following using the Keras Functional API:
# Define inputs
meta_inputs = tf.keras.Input(shape=(2065,))
img_inputs = tf.keras.Input(shape=(80,120,3,))
# Model 1
meta_layer1 = tf.keras.layers.Dense(64, activation='relu')(meta_inputs)
meta_output_layer = tf.keras.layers.Dense(1, activation='sigmoid')(meta_layer1)
# Model 2
img_conv_layer1 = tf.keras.layers.Conv2D(32, kernel_size=(3,3), padding='same', activation='relu')(img_inputs)
img_pooling_layer1 = tf.keras.layers.MaxPooling2D()(img_conv_layer1)
img_conv_layer2 = tf.keras.layers.Conv2D(64, kernel_size=(3,3), padding='same', activation='relu')(img_pooling_layer1)
img_pooling_layer2 = tf.keras.layers.MaxPooling2D()(img_conv_layer2)
img_flatten_layer = tf.keras.layers.Flatten()(img_pooling_layer2)
img_dense_layer = tf.keras.layers.Dense(1024, activation='relu')(img_flatten_layer)
img_output_layer = tf.keras.layers.Dense(1, activation='sigmoid')(img_dense_layer)
# Merge models
merged = tf.keras.layers.add([meta_output_layer, img_output_layer])
# Define functional model
model = tf.keras.Model(inputs=[meta_inputs, img_inputs], outputs=merged)
# Compile model
auc = tf.keras.metrics.AUC(name = 'auc')
model.compile('adam', loss='binary_crossentropy', metrics=[auc])
I then proceed to fit the model:
epochs = 15
history = model.fit([meta_train, img_train], y_train, epochs=epochs, batch_size=500, validation_data=([meta_test, img_test], y_test))
This produces an error, and I'm quite frankly not sure what to do with it:
InvalidArgumentError Traceback (most recent call last)
<ipython-input-11-5ec0cf9ac1d1> in <module>
1 epochs = 15
----> 2 history = model.fit([meta_train, img_train], y_train, epochs=epochs, batch_size=500, validation_data=([meta_test, img_test], y_test))
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
1098 _r=1):
1099 callbacks.on_train_batch_begin(step)
-> 1100 tmp_logs = self.train_function(iterator)
1101 if data_handler.should_sync:
1102 context.async_wait()
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
826 tracing_count = self.experimental_get_tracing_count()
827 with trace.Trace(self._name) as tm:
--> 828 result = self._call(*args, **kwds)
829 compiler = "xla" if self._experimental_compile else "nonXla"
830 new_tracing_count = self.experimental_get_tracing_count()
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds)
886 # Lifting succeeded, so variables are initialized and we can run the
887 # stateless function.
--> 888 return self._stateless_fn(*args, **kwds)
889 else:
890 _, _, _, filtered_flat_args = \
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in __call__(self, *args, **kwargs)
2940 (graph_function,
2941 filtered_flat_args) = self._maybe_define_function(args, kwargs)
-> 2942 return graph_function._call_flat(
2943 filtered_flat_args, captured_inputs=graph_function.captured_inputs) # pylint: disable=protected-access
2944
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _call_flat(self, args, captured_inputs, cancellation_manager)
1916 and executing_eagerly):
1917 # No tape is watching; skip to running the function.
-> 1918 return self._build_call_outputs(self._inference_function.call(
1919 ctx, args, cancellation_manager=cancellation_manager))
1920 forward_backward = self._select_forward_and_backward_functions(
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in call(self, ctx, args, cancellation_manager)
553 with _InterpolateFunctionError(self):
554 if cancellation_manager is None:
--> 555 outputs = execute.execute(
556 str(self.signature.name),
557 num_outputs=self._num_outputs,
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
57 try:
58 ctx.ensure_initialized()
---> 59 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
60 inputs, attrs, num_outputs)
61 except core._NotOkStatusException as e:
InvalidArgumentError: assertion failed: [predictions must be <= 1] [Condition x <= y did not hold element-wise:] [x (model/add/add:0) = ] [[1.47704351][1.48876262][1.50816929]...] [y (Cast_4/x:0) = ] [1]
[[{{node assert_less_equal/Assert/AssertGuard/else/_11/assert_less_equal/Assert/AssertGuard/Assert}}]] [Op:__inference_train_function_1331]
Function call stack:
train_function
Being new to Tensorflow, Keras, and deep learning in general, I'm not even sure where to begin diagnosing the issue. I don't know what [predictions must be <= 1] [Condition x <= y did not hold element-wise:] is referring to, for example: y_train is an array of 1s and 0s, and should hold to that assertion. I've even tried reshaping it to a (N,1)-D array, to no effect.
Can anyone point me in the right direction on this?
AUC metrics need probabilities in [0,1].
In your model, this not happen due to the sum you do in merged layer. You can solve for example using an average instead of a sum:
merged = tf.keras.layers.Average()([meta_output_layer, img_output_layer])
I was trying to see if I can detect Higgs Boson using transfer learning and I am unable to understand the error message.
I was wondering if it has something to do with the fact that the mentioned model was designed for computer vision so it'll work only for that (which I don't think is the case, but any inputs are appreciated)
Heres the code and error message
import tensorflow.compat.v2 as tf
import tensorflow_hub as hub
m = hub.KerasLayer('https://tfhub.dev/google/on_device_vision/classifier/landmarks_classifier_oceania_antarctica_V1/1')
m = tf.keras.Sequential([
m,
tf.keras.layers.Dense(2, activation='softmax'),
])
m.compile(loss = 'binary_crossentropy',
optimizer = 'adam', metrics = ['accuracy','binary_accuracy'])
history = m.fit(ds_train,validation_data=ds_valid, epochs =12 ,steps_per_epoch=13)
Error :
ValueError Traceback (most recent call last)
<ipython-input-20-0c5a3b4a3d55> in <module>
11 m.compile(loss = 'binary_crossentropy',
12 optimizer = 'adam', metrics = ['accuracy','binary_accuracy'])
---> 13 history = m.fit(ds_train,validation_data=ds_valid, epochs =12 ,steps_per_epoch=13)
/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py in _method_wrapper(self, *args, **kwargs)
64 def _method_wrapper(self, *args, **kwargs):
65 if not self._in_multi_worker_mode(): # pylint: disable=protected-access
---> 66 return method(self, *args, **kwargs)
67
68 # Running inside `run_distribute_coordinator` already.
/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
846 batch_size=batch_size):
847 callbacks.on_train_batch_begin(step)
--> 848 tmp_logs = train_function(iterator)
849 # Catch OutOfRangeError for Datasets of unknown size.
850 # This blocks until the batch has finished executing.
/opt/conda/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
578 xla_context.Exit()
579 else:
--> 580 result = self._call(*args, **kwds)
581
582 if tracing_count == self._get_tracing_count():
/opt/conda/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds)
625 # This is the first call of __call__, so we have to initialize.
626 initializers = []
--> 627 self._initialize(args, kwds, add_initializers_to=initializers)
628 finally:
629 # At this point we know that the initialization is complete (or less
/opt/conda/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
504 self._concrete_stateful_fn = (
505 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
--> 506 *args, **kwds))
507
508 def invalid_creator_scope(*unused_args, **unused_kwds):
/opt/conda/lib/python3.7/site-packages/tensorflow/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
2444 args, kwargs = None, None
2445 with self._lock:
-> 2446 graph_function, _, _ = self._maybe_define_function(args, kwargs)
2447 return graph_function
2448
/opt/conda/lib/python3.7/site-packages/tensorflow/python/eager/function.py in _maybe_define_function(self, args, kwargs)
2775
2776 self._function_cache.missed.add(call_context_key)
-> 2777 graph_function = self._create_graph_function(args, kwargs)
2778 self._function_cache.primary[cache_key] = graph_function
2779 return graph_function, args, kwargs
/opt/conda/lib/python3.7/site-packages/tensorflow/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
2665 arg_names=arg_names,
2666 override_flat_arg_shapes=override_flat_arg_shapes,
-> 2667 capture_by_value=self._capture_by_value),
2668 self._function_attributes,
2669 # Tell the ConcreteFunction to clean up its graph once it goes out of
/opt/conda/lib/python3.7/site-packages/tensorflow/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
979 _, original_func = tf_decorator.unwrap(python_func)
980
--> 981 func_outputs = python_func(*func_args, **func_kwargs)
982
983 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
/opt/conda/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py in wrapped_fn(*args, **kwds)
439 # __wrapped__ allows AutoGraph to swap in a converted function. We give
440 # the function a weak reference to itself to avoid a reference cycle.
--> 441 return weak_wrapped_fn().__wrapped__(*args, **kwds)
442 weak_wrapped_fn = weakref.ref(wrapped_fn)
443
/opt/conda/lib/python3.7/site-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
966 except Exception as e: # pylint:disable=broad-except
967 if hasattr(e, "ag_error_metadata"):
--> 968 raise e.ag_error_metadata.to_exception(e)
969 else:
970 raise
ValueError: in user code:
/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py:571 train_function *
outputs = self.distribute_strategy.run(
/opt/conda/lib/python3.7/site-packages/tensorflow_hub/keras_layer.py:222 call *
result = f()
/opt/conda/lib/python3.7/site-packages/tensorflow/python/eager/function.py:1605 __call__ **
return self._call_impl(args, kwargs)
/opt/conda/lib/python3.7/site-packages/tensorflow/python/eager/function.py:1645 _call_impl
return self._call_flat(args, self.captured_inputs, cancellation_manager)
/opt/conda/lib/python3.7/site-packages/tensorflow/python/eager/function.py:1730 _call_flat
arg.shape))
ValueError: The argument 'images' (value Tensor("IteratorGetNext:0", shape=(None, 28), dtype=float32, device=/job:worker/replica:0/task:0/device:CPU:0)) is not compatible with the shape this function was traced with. Expected shape (None, 321, 321, 3), but got shape (None, 28).
If you called get_concrete_function, you may need to pass a tf.TensorSpec(..., shape=...) with a less specific shape, having None on axes which can vary.
Any effort is appreciated
Thanks a lot
As per the official documentation of Land Marks Classifier,
Inputs are expected to be 3-channel RGB color images of size 321 x
321, scaled to [0, 1].
But from Your Dataset, file format is tfrecord.
When we use Transfer Learning and when we want to reuse the Models either from TF Hub or from tf.keras.applications, our data should be in the predefined-format as mentioned in the documentation.
So, please ensure that your Dataset comprises of Images and resize Image Array to (321,321,3) for the TF Hub Module to work.
I am new to Machine Learning and Deep learning. I followed a tutorial for Convolutional Neural Network. But the tutorial was for binary classification. Now I tried my own for a categorical dataset with a few things changed and getting this error.
My code:
import tensorflow as tf
from keras.preprocessing import image
train_datagen = image.ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True
)
training_set = train_datagen.flow_from_directory(
'datasets/training_data/',
target_size=(64,64),
batch_size=32,
class_mode='categorical'
)
test_datagen = image.ImageDataGenerator(
rescale=1./255,
)
test_set = test_datagen.flow_from_directory(
'datasets/testing_data/',
target_size=(64,64),
batch_size=32,
class_mode='categorical'
)
cnn = tf.keras.models.Sequential()
cnn.add(tf.keras.layers.Conv2D(
filters = 32,
kernel_size = 3,
activation = 'relu',
input_shape = [64,64,3]
))
cnn.add(tf.keras.layers.MaxPool2D(
pool_size = 2,
strides = 2
))
cnn.add(tf.keras.layers.Conv2D(
filters = 32,
kernel_size = 3,
activation = 'relu'
))
cnn.add(tf.keras.layers.MaxPool2D(
pool_size = 2,
strides = 2
))
cnn.add(tf.keras.layers.Flatten())
cnn.add(tf.keras.layers.Dense(
units = 128,
activation = 'relu'
))
cnn.add(tf.keras.layers.Dense(
units = 1,
activation = 'softmax'
))
cnn.compile(
optimizer='adam',
loss = 'categorical_crossentropy',
metrics=['accuracy']
)
cnn.fit(x = training_set,
validation_data = test_set,
epochs = 25
)
Error:
InvalidArgumentError Traceback (most recent call last)
<ipython-input-17-52da1b2b0cd1> in <module>
----> 1 cnn.fit(x = training_set,
2 validation_data = test_set,
3 epochs = 25)
~\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py in _method_wrapper(self, *args, **kwargs)
106 def _method_wrapper(self, *args, **kwargs):
107 if not self._in_multi_worker_mode(): # pylint: disable=protected-access
--> 108 return method(self, *args, **kwargs)
109
110 # Running inside `run_distribute_coordinator` already.
~\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
1096 batch_size=batch_size):
1097 callbacks.on_train_batch_begin(step)
-> 1098 tmp_logs = train_function(iterator)
1099 if data_handler.should_sync:
1100 context.async_wait()
~\anaconda3\lib\site-packages\tensorflow\python\eager\def_function.py in __call__(self, *args, **kwds)
778 else:
779 compiler = "nonXla"
--> 780 result = self._call(*args, **kwds)
781
782 new_tracing_count = self._get_tracing_count()
~\anaconda3\lib\site-packages\tensorflow\python\eager\def_function.py in _call(self, *args, **kwds)
805 # In this case we have created variables on the first call, so we run the
806 # defunned version which is guaranteed to never create variables.
--> 807 return self._stateless_fn(*args, **kwds) # pylint: disable=not-callable
808 elif self._stateful_fn is not None:
809 # Release the lock early so that multiple threads can perform the call
~\anaconda3\lib\site-packages\tensorflow\python\eager\function.py in __call__(self, *args, **kwargs)
2827 with self._lock:
2828 graph_function, args, kwargs = self._maybe_define_function(args, kwargs)
-> 2829 return graph_function._filtered_call(args, kwargs) # pylint: disable=protected-access
2830
2831 #property
~\anaconda3\lib\site-packages\tensorflow\python\eager\function.py in _filtered_call(self, args, kwargs, cancellation_manager)
1841 `args` and `kwargs`.
1842 """
-> 1843 return self._call_flat(
1844 [t for t in nest.flatten((args, kwargs), expand_composites=True)
1845 if isinstance(t, (ops.Tensor,
~\anaconda3\lib\site-packages\tensorflow\python\eager\function.py in _call_flat(self, args, captured_inputs, cancellation_manager)
1921 and executing_eagerly):
1922 # No tape is watching; skip to running the function.
-> 1923 return self._build_call_outputs(self._inference_function.call(
1924 ctx, args, cancellation_manager=cancellation_manager))
1925 forward_backward = self._select_forward_and_backward_functions(
~\anaconda3\lib\site-packages\tensorflow\python\eager\function.py in call(self, ctx, args, cancellation_manager)
543 with _InterpolateFunctionError(self):
544 if cancellation_manager is None:
--> 545 outputs = execute.execute(
546 str(self.signature.name),
547 num_outputs=self._num_outputs,
~\anaconda3\lib\site-packages\tensorflow\python\eager\execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
57 try:
58 ctx.ensure_initialized()
---> 59 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
60 inputs, attrs, num_outputs)
61 except core._NotOkStatusException as e:
InvalidArgumentError: Matrix size-incompatible: In[0]: [32,6], In[1]: [128,1]
[[node gradient_tape/sequential/dense_1/MatMul (defined at <ipython-input-16-c714df782bf1>:1) ]] [Op:__inference_train_function_798]
Function call stack:
train_function
If anyone can help me find the solution that would be awesome. Thankyou
For a specific problem in reinforcement learning (inspired in this paper), I'm using a RNN which is fed with data of shape (batch_size, time_steps, features) = (1,1,1), for L data-points, and then a "cycle" is over; with a LSTM cell. I'm using lstm.stateful = True, and after L feeds to the network, I call lstm.reset_states().
Between one cycle and another one, and just after calling lstm.reset_states(), I'd like to evaluate the output of the network on an input data of the shape (batch_size, time_steps, features) = (L,1,1); and then to continue using again the RNN with input of batch_size = 1.
Furthermore, I want the code to be as optimized as possible, and for this I'm trying to use AutoGraph via the #tf.function decorators.
The problem is that I encounter an error, that can be recreated with the following example (notice that if #tf.function is removed, everything works, and I don't understand why?)
import tensorflow as tf
import numpy as np
class Actor(tf.keras.Model):
def __init__(self):
super(Actor,self).__init__()
self.lstm = tf.keras.layers.LSTM(5, return_sequences=True, stateful=True, input_shape=(None,None,1))#, input_shape=(None,None,1))
def call(self, inputs):
feat= self.lstm(inputs)
return feat
actor = Actor()
#tf.function
def g(actor):
context1 = tf.reshape(np.array([0.]*10),(10,1,1))
actor(context1)
actor.reset_states()
actor.lstm.stateful=False
context = tf.reshape(np.array([0.]),(1,1,1))
actor(context)
g(actor)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-28-4487772bee64> in <module>
23 actor(context)
24
---> 25 g(actor)
~/.local/lib/python3.6/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
578 xla_context.Exit()
579 else:
--> 580 result = self._call(*args, **kwds)
581
582 if tracing_count == self._get_tracing_count():
~/.local/lib/python3.6/site-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds)
625 # This is the first call of __call__, so we have to initialize.
626 initializers = []
--> 627 self._initialize(args, kwds, add_initializers_to=initializers)
628 finally:
629 # At this point we know that the initialization is complete (or less
~/.local/lib/python3.6/site-packages/tensorflow/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
504 self._concrete_stateful_fn = (
505 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
--> 506 *args, **kwds))
507
508 def invalid_creator_scope(*unused_args, **unused_kwds):
~/.local/lib/python3.6/site-packages/tensorflow/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
2444 args, kwargs = None, None
2445 with self._lock:
-> 2446 graph_function, _, _ = self._maybe_define_function(args, kwargs)
2447 return graph_function
2448
~/.local/lib/python3.6/site-packages/tensorflow/python/eager/function.py in _maybe_define_function(self, args, kwargs)
2775
2776 self._function_cache.missed.add(call_context_key)
-> 2777 graph_function = self._create_graph_function(args, kwargs)
2778 self._function_cache.primary[cache_key] = graph_function
2779 return graph_function, args, kwargs
~/.local/lib/python3.6/site-packages/tensorflow/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
2665 arg_names=arg_names,
2666 override_flat_arg_shapes=override_flat_arg_shapes,
-> 2667 capture_by_value=self._capture_by_value),
2668 self._function_attributes,
2669 # Tell the ConcreteFunction to clean up its graph once it goes out of
~/.local/lib/python3.6/site-packages/tensorflow/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
979 _, original_func = tf_decorator.unwrap(python_func)
980
--> 981 func_outputs = python_func(*func_args, **func_kwargs)
982
983 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
~/.local/lib/python3.6/site-packages/tensorflow/python/eager/def_function.py in wrapped_fn(*args, **kwds)
439 # __wrapped__ allows AutoGraph to swap in a converted function. We give
440 # the function a weak reference to itself to avoid a reference cycle.
--> 441 return weak_wrapped_fn().__wrapped__(*args, **kwds)
442 weak_wrapped_fn = weakref.ref(wrapped_fn)
443
~/.local/lib/python3.6/site-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
966 except Exception as e: # pylint:disable=broad-except
967 if hasattr(e, "ag_error_metadata"):
--> 968 raise e.ag_error_metadata.to_exception(e)
969 else:
970 raise
ValueError: in user code:
<ipython-input-28-4487772bee64>:23 g *
actor(context)
<ipython-input-28-4487772bee64>:11 call *
feat= self.lstm(inputs)
/home/cooper-cooper/.local/lib/python3.6/site-packages/tensorflow/python/keras/layers/recurrent.py:654 __call__ **
return super(RNN, self).__call__(inputs, **kwargs)
/home/cooper-cooper/.local/lib/python3.6/site-packages/tensorflow/python/keras/engine/base_layer.py:886 __call__
self.name)
/home/cooper-cooper/.local/lib/python3.6/site-packages/tensorflow/python/keras/engine/input_spec.py:227 assert_input_compatibility
', found shape=' + str(shape))
ValueError: Input 0 is incompatible with layer lstm_7: expected shape=(10, None, 1), found shape=[1, 1, 1]
If anyone interested, I found an answer form the following posts, and the workaround solution for me was the following:
import tensorflow as tf
import numpy as np
class Actor(tf.keras.Model):
def __init__(self):
super(Actor,self).__init__()
self.lstm = tf.keras.layers.LSTM(5, return_sequences=True, stateful=True,input_shape=(1,1))#, input_shape=(None,None,1))
def call(self, inputs):
feat= self.lstm(inputs)
return feat
def reset_states_workaround(self, new_batch_size=1):
self.lstm.states = [tf.Variable(tf.random.uniform((new_batch_size,5))), tf.Variable(tf.random.uniform((new_batch_size,5)))]
self.lstm.input_spec = [tf.keras.layers.InputSpec(shape=(new_batch_size,None,1), ndim=3)]
And then, between two different calls with #tf.function, I do:
actor = Actor()
#tf.function
def g(actor):
context1 = tf.reshape(np.array([0.]*10),(10,1,1))
preds = actor(context1)
return preds
g(actor)
actor.reset_states_workaround(new_batch_size=1)
#tf.function
def g2(actor):
context1 = tf.reshape(np.array([0.]*1),(1,1,1))
preds = actor(context1)
return preds
g2(actor)
A problem with using actor.reset_states_workaround(new_batch_size=1) inside #tf.function arises though: ValueError: tf.function-decorated function tried to create variables on non-first call., and that's why i'm using it outside.
I cannot understand how to use tensorflow dataset as input for my model. I have a X as (n_sample, max_sentence_size) and a y as (n_sample) but I cannot match the dimension, I am not sure what tensorflow do internaly.
Below you can find a reprroducible example with empty matrix, but my data is not empty, it is an integer representation of text.
X_train = np.zeros((16, 6760))
y_train = np.zeros((16))
train = tf.data.Dataset.from_tensor_slices((X_train, y_train))
# Prepare for tensorflow
BUFFER_SIZE = 10000
BATCH_SIZE = 64
VOCAB_SIZE = 5354
train = train.shuffle(BUFFER_SIZE)#.batch(BATCH_SIZE)
# Select index of interest in text
import tensorflow as tf
model = tf.keras.Sequential([
tf.keras.layers.Embedding(input_dim=VOCAB_SIZE, output_dim=64, mask_zero=False),
tf.keras.layers.Bidirectional(tf.keras.layers.GRU(64)),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(VOCAB_SIZE, activation='softmax'),
])
model.compile(loss="sparse_categorical_crossentropy",
# loss=tf.keras.losses.MeanAbsoluteError(),
optimizer=tf.keras.optimizers.Adam(1e-4),
metrics=['sparse_categorical_accuracy'])
history = model.fit(train, epochs=3,
)
ValueError Traceback (most recent call last)
<ipython-input-74-3a160a5713dd> in <module>
----> 1 history = model.fit(train, epochs=3,
2 # validation_data=test,
3 # validation_steps=30
4 )
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, max_queue_size, workers, use_multiprocessing,
**kwargs)
817 max_queue_size=max_queue_size,
818 workers=workers,
--> 819 use_multiprocessing=use_multiprocessing)
820
821 def evaluate(self,
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_v2.py in fit(self, model, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, max_queue_size, workers, use_multiprocessing,
**kwargs)
340 mode=ModeKeys.TRAIN,
341 training_context=training_context,
--> 342 total_epochs=epochs)
343 cbks.make_logs(model, epoch_logs, training_result, ModeKeys.TRAIN)
344
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_v2.py in run_one_epoch(model, iterator, execution_function, dataset_size, batch_size, strategy, steps_per_epoch, num_samples, mode, training_context, total_epochs)
126 step=step, mode=mode, size=current_batch_size) as batch_logs:
127 try:
--> 128 batch_outs = execution_function(iterator)
129 except (StopIteration, errors.OutOfRangeError):
130 # TODO(kaftan): File bug about tf function and errors.OutOfRangeError?
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_v2_utils.py in execution_function(input_fn)
96 # `numpy` translates Tensors to values in Eager mode.
97 return nest.map_structure(_non_none_constant_value,
---> 98 distributed_function(input_fn))
99
100 return execution_function
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/eager/def_function.py in __call__(self, *args, **kwds)
566 xla_context.Exit()
567 else:
--> 568 result = self._call(*args, **kwds)
569
570 if tracing_count == self._get_tracing_count():
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/eager/def_function.py in _call(self, *args, **kwds)
613 # This is the first call of __call__, so we have to initialize.
614 initializers = []
--> 615 self._initialize(args, kwds, add_initializers_to=initializers)
616 finally:
617 # At this point we know that the initialization is complete (or less
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
495 self._concrete_stateful_fn = (
496 self._stateful_fn._get_concrete_function_internal_garbage_collected(
# pylint: disable=protected-access
--> 497 *args, **kwds))
498
499 def invalid_creator_scope(*unused_args, **unused_kwds):
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args,
**kwargs)
2387 args, kwargs = None, None
2388 with self._lock:
-> 2389 graph_function, _, _ = self._maybe_define_function(args, kwargs)
2390 return graph_function
2391
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/eager/function.py in _maybe_define_function(self, args, kwargs)
2701
2702 self._function_cache.missed.add(call_context_key)
-> 2703 graph_function = self._create_graph_function(args, kwargs)
2704 self._function_cache.primary[cache_key] = graph_function
2705 return graph_function, args, kwargs
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
2591 arg_names=arg_names,
2592 override_flat_arg_shapes=override_flat_arg_shapes,
-> 2593 capture_by_value=self._capture_by_value),
2594 self._function_attributes,
2595 # Tell the ConcreteFunction to clean up its graph once it goes out of
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
976 converted_func)
977
--> 978 func_outputs = python_func(*func_args, **func_kwargs)
979
980 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/eager/def_function.py in wrapped_fn(*args, **kwds)
437 # __wrapped__ allows AutoGraph to swap in a converted function. We give
438 # the function a weak reference to itself to avoid a reference cycle.
--> 439 return weak_wrapped_fn().__wrapped__(*args, **kwds)
440 weak_wrapped_fn = weakref.ref(wrapped_fn)
441
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_v2_utils.py in distributed_function(input_iterator)
83 args = _prepare_feed_values(model, input_iterator, mode, strategy)
84 outputs = strategy.experimental_run_v2(
---> 85 per_replica_function, args=args)
86 # Out of PerReplica outputs reduce or pick values to return.
87 all_outputs = dist_utils.unwrap_output_dict(
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/distribute/distribute_lib.py in experimental_run_v2(self, fn, args, kwargs)
761 fn = autograph.tf_convert(fn, ag_ctx.control_status_ctx(),
762 convert_by_default=False)
--> 763 return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
764
765 def reduce(self, reduce_op, value, axis):
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/distribute/distribute_lib.py in call_for_each_replica(self, fn, args, kwargs)
1817 kwargs = {}
1818 with self._container_strategy().scope():
-> 1819 return self._call_for_each_replica(fn, args, kwargs)
1820
1821 def _call_for_each_replica(self, fn, args, kwargs):
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/distribute/distribute_lib.py in _call_for_each_replica(self, fn, args, kwargs)
2162 self._container_strategy(),
2163 replica_id_in_sync_group=constant_op.constant(0, dtypes.int32)):
-> 2164 return fn(*args, **kwargs)
2165
2166 def _reduce_to(self, reduce_op, value, destinations):
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/autograph/impl/api.py in wrapper(*args, **kwargs)
290 def wrapper(*args, **kwargs):
291 with ag_ctx.ControlStatusCtx(status=ag_ctx.Status.DISABLED):
--> 292 return func(*args, **kwargs)
293
294 if inspect.isfunction(func) or inspect.ismethod(func):
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_v2_utils.py in train_on_batch(model, x, y, sample_weight, class_weight, reset_metrics, standalone)
431 y,
432 sample_weights=sample_weights,
--> 433 output_loss_metrics=model._output_loss_metrics)
434
435 if reset_metrics:
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_eager.py in train_on_batch(model, inputs, targets, sample_weights, output_loss_metrics)
310 sample_weights=sample_weights,
311 training=True,
--> 312 output_loss_metrics=output_loss_metrics))
313 if not isinstance(outs, list):
314 outs = [outs]
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_eager.py in _process_single_batch(model, inputs, targets, output_loss_metrics, sample_weights, training)
251 output_loss_metrics=output_loss_metrics,
252 sample_weights=sample_weights,
--> 253 training=training))
254 if total_loss is None:
255 raise ValueError('The model cannot be run '
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_eager.py in _model_loss(model, inputs, targets, output_loss_metrics, sample_weights, training)
165
166 if hasattr(loss_fn, 'reduction'):
--> 167 per_sample_losses = loss_fn.call(targets[i], outs[i])
168 weighted_losses = losses_utils.compute_weighted_loss(
169 per_sample_losses,
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/keras/losses.py in call(self, y_true, y_pred)
219 y_pred, y_true = tf_losses_util.squeeze_or_expand_dimensions(
220 y_pred, y_true)
--> 221 return self.fn(y_true, y_pred, **self._fn_kwargs)
222
223 def get_config(self):
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/keras/losses.py in sparse_categorical_crossentropy(y_true, y_pred, from_logits, axis)
976 def sparse_categorical_crossentropy(y_true, y_pred, from_logits=False, axis=-1):
977 return K.sparse_categorical_crossentropy(
--> 978 y_true, y_pred, from_logits=from_logits, axis=axis)
979
980
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/keras/backend.py in sparse_categorical_crossentropy(target, output, from_logits, axis)
4571 with get_graph().as_default():
4572 res = nn.sparse_softmax_cross_entropy_with_logits_v2(
-> 4573 labels=target, logits=output)
4574 else:
4575 res = nn.sparse_softmax_cross_entropy_with_logits_v2(
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py in sparse_softmax_cross_entropy_with_logits_v2(labels, logits, name)
3535 """
3536 return sparse_softmax_cross_entropy_with_logits(
-> 3537 labels=labels, logits=logits, name=name)
3538
3539
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py in sparse_softmax_cross_entropy_with_logits(_sentinel, labels, logits, name)
3451 "should equal the shape of logits except for the last "
3452 "dimension (received %s)." % (labels_static_shape,
-> 3453 logits.get_shape()))
3454 # Check if no reshapes are required.
3455 if logits.get_shape().ndims == 2:
ValueError: Shape mismatch: The shape of labels (received (1,)) should equal the shape of logits except for the last dimension (received (6760, 5354)).
this works for me in Tensorflow 2.0.
import numpy as np
# Prepare for tensorflow
BUFFER_SIZE = 10000
BATCH_SIZE = 64
VOCAB_SIZE = 5354
X_train = np.zeros((16,6760))
y_train = np.zeros((16,1)) # This is changed
train = tf.data.Dataset.from_tensor_slices((X_train, y_train))
train = train.shuffle(BUFFER_SIZE).batch(8) # This is changed
# Select index of interest in text
model = tf.keras.Sequential([
tf.keras.layers.Embedding(input_dim=VOCAB_SIZE, output_dim=64,input_length= 6760, mask_zero=False),
tf.keras.layers.Bidirectional(tf.keras.layers.GRU(64)),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(VOCAB_SIZE, activation='softmax'),
])
print(model.summary())
model.compile(loss="sparse_categorical_crossentropy",
# loss=tf.keras.losses.MeanAbsoluteError(),
optimizer=tf.keras.optimizers.Adam(1e-4),
metrics=['sparse_categorical_accuracy'])
history = model.fit(train, epochs=3)
For those with the same problem, I didn't understood immediatly the change of rajesh, the problem was the absence of batch dimension.
I replaced :
train = train.shuffle(BUFFER_SIZE) #.batch(BATCH_SIZE)
with (uncommented the "batch") :
train = train.shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
and it worked.