Related
When using Keras Tuner, there doesn't seem to be a way to allow the skipping of a problematic combination of hyperparams. For example, the number of filters in a Conv1D layer may not be compatible with all values of pool size in the following MaxPooling1D layer and thus lead to an error in model building. However, this may not be known before running the tuner. Once the tuner is run, this will lead to an error that will terminate the whole tuning process. Is there a way to skip any hyperparam combinations that result in an error?
Sample code:
def model_builder(hp):
model = Sequential()
model.add(
Embedding(
input_dim=hp.Int(
'vocab_size',
min_value=4000,
max_value=10000,
step=1000,
default=4000
),
output_dim=hp.Choice(
'embedding_dim',
values=[32, 64, 128, 256],
default=32
),
input_length=hp.Int(
'max_length',
min_value=50,
max_value=200,
step=10,
default=50
)
)
)
model.add(
Conv1D(
filters=hp.Choice(
'num_filters_1',
values=[32, 64],
default=32
),
kernel_size=hp.Choice(
'kernel_size_1',
values=[3, 5, 7, 9],
default=7
),
activation='relu'
)
)
model.add(
MaxPooling1D(
pool_size=hp.Choice(
'pool_size',
values=[3, 5],
default=5
)
)
)
model.add(
Conv1D(
filters=hp.Choice(
'num_filters_2',
values=[32, 64],
default=32
),
kernel_size=hp.Choice(
'kernel_size_2',
values=[3, 5, 7, 9],
default=7
),
activation='relu'
)
)
model.add(
GlobalMaxPooling1D()
)
model.add(
Dropout(
rate=hp.Float(
'dropout_1',
min_value=0.0,
max_value=0.5,
default=0.5,
step=0.05
)
)
)
model.add(
Dense(
units=hp.Int(
'units',
min_value=10,
max_value=100,
step=10,
default=10
),
kernel_regularizer=tf.keras.regularizers.l2(
hp.Float(
'regularizer_1',
min_value=1e-4,
max_value=1e-1,
sampling='LOG',
default=1e-2
)
),
activation='relu'
)
)
model.add(
Dropout(
hp.Float(
'dropout_2',
min_value=0.0,
max_value=0.5,
default=0.5,
step=0.05
)
)
)
model.add(
Dense(
1,
kernel_regularizer=tf.keras.regularizers.l2(
hp.Float(
'regularizer_2',
min_value=1e-4,
max_value=1e-1,
sampling='LOG',
default=1e-2
)
),
activation='sigmoid'
)
)
model.compile(
loss='binary_crossentropy',
optimizer=hp.Choice(
'optimizer',
values=['rmsprop', 'adam', 'sgd']
),
metrics=['accuracy']
)
return model
tuner = kt.Hyperband(
model_builder,
objective='val_accuracy',
max_epochs=20,
#factor=3,
directory='my_dir',
project_name='cec',
seed=seed
)
class ClearTrainingOutput(tf.keras.callbacks.Callback):
def on_train_end(*args, **kwargs):
IPython.display.clear_output(wait=True)
tuner.search(
X_train,
y_train,
epochs=20,
validation_data=(X_test, y_test),
callbacks=[ClearTrainingOutput()]
)
The error message:
Epoch 1/3
WARNING:tensorflow:Model was constructed with shape (None, 150) for input Tensor("embedding_input:0", shape=(None, 150), dtype=float32), but it was called on an input with incompatible shape (32, 50).
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-62-16a1eae457d8> in <module>
3 IPython.display.clear_output(wait=True)
4
----> 5 tuner.search(
6 X_train,
7 y_train,
~/anaconda3/envs/cec/lib/python3.8/site-packages/kerastuner/engine/base_tuner.py in search(self, *fit_args, **fit_kwargs)
128
129 self.on_trial_begin(trial)
--> 130 self.run_trial(trial, *fit_args, **fit_kwargs)
131 self.on_trial_end(trial)
132 self.on_search_end()
~/anaconda3/envs/cec/lib/python3.8/site-packages/kerastuner/tuners/hyperband.py in run_trial(self, trial, *fit_args, **fit_kwargs)
385 fit_kwargs['epochs'] = hp.values['tuner/epochs']
386 fit_kwargs['initial_epoch'] = hp.values['tuner/initial_epoch']
--> 387 super(Hyperband, self).run_trial(trial, *fit_args, **fit_kwargs)
388
389 def _build_model(self, hp):
~/anaconda3/envs/cec/lib/python3.8/site-packages/kerastuner/engine/multi_execution_tuner.py in run_trial(self, trial, *fit_args, **fit_kwargs)
94
95 model = self.hypermodel.build(trial.hyperparameters)
---> 96 history = model.fit(*fit_args, **copied_fit_kwargs)
97 for metric, epoch_values in history.history.items():
98 if self.oracle.objective.direction == 'min':
~/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py in _method_wrapper(self, *args, **kwargs)
64 def _method_wrapper(self, *args, **kwargs):
65 if not self._in_multi_worker_mode(): # pylint: disable=protected-access
---> 66 return method(self, *args, **kwargs)
67
68 # Running inside `run_distribute_coordinator` already.
~/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
846 batch_size=batch_size):
847 callbacks.on_train_batch_begin(step)
--> 848 tmp_logs = train_function(iterator)
849 # Catch OutOfRangeError for Datasets of unknown size.
850 # This blocks until the batch has finished executing.
~/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
578 xla_context.Exit()
579 else:
--> 580 result = self._call(*args, **kwds)
581
582 if tracing_count == self._get_tracing_count():
~/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds)
625 # This is the first call of __call__, so we have to initialize.
626 initializers = []
--> 627 self._initialize(args, kwds, add_initializers_to=initializers)
628 finally:
629 # At this point we know that the initialization is complete (or less
~/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
503 self._graph_deleter = FunctionDeleter(self._lifted_initializer_graph)
504 self._concrete_stateful_fn = (
--> 505 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
506 *args, **kwds))
507
~/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
2444 args, kwargs = None, None
2445 with self._lock:
-> 2446 graph_function, _, _ = self._maybe_define_function(args, kwargs)
2447 return graph_function
2448
~/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _maybe_define_function(self, args, kwargs)
2775
2776 self._function_cache.missed.add(call_context_key)
-> 2777 graph_function = self._create_graph_function(args, kwargs)
2778 self._function_cache.primary[cache_key] = graph_function
2779 return graph_function, args, kwargs
~/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
2655 arg_names = base_arg_names + missing_arg_names
2656 graph_function = ConcreteFunction(
-> 2657 func_graph_module.func_graph_from_py_func(
2658 self._name,
2659 self._python_function,
~/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
979 _, original_func = tf_decorator.unwrap(python_func)
980
--> 981 func_outputs = python_func(*func_args, **func_kwargs)
982
983 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
~/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in wrapped_fn(*args, **kwds)
439 # __wrapped__ allows AutoGraph to swap in a converted function. We give
440 # the function a weak reference to itself to avoid a reference cycle.
--> 441 return weak_wrapped_fn().__wrapped__(*args, **kwds)
442 weak_wrapped_fn = weakref.ref(wrapped_fn)
443
~/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
966 except Exception as e: # pylint:disable=broad-except
967 if hasattr(e, "ag_error_metadata"):
--> 968 raise e.ag_error_metadata.to_exception(e)
969 else:
970 raise
ValueError: in user code:
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py:571 train_function *
outputs = self.distribute_strategy.run(
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/distribute/distribute_lib.py:951 run **
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/distribute/distribute_lib.py:2290 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/distribute/distribute_lib.py:2649 _call_for_each_replica
return fn(*args, **kwargs)
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py:531 train_step **
y_pred = self(x, training=True)
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/keras/engine/base_layer.py:927 __call__
outputs = call_fn(cast_inputs, *args, **kwargs)
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/keras/engine/sequential.py:277 call
return super(Sequential, self).call(inputs, training=training, mask=mask)
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/keras/engine/network.py:717 call
return self._run_internal_graph(
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/keras/engine/network.py:888 _run_internal_graph
output_tensors = layer(computed_tensors, **kwargs)
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/keras/engine/base_layer.py:927 __call__
outputs = call_fn(cast_inputs, *args, **kwargs)
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/keras/layers/convolutional.py:207 call
outputs = self._convolution_op(inputs, self.kernel)
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/ops/nn_ops.py:1106 __call__
return self.conv_op(inp, filter)
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/ops/nn_ops.py:638 __call__
return self.call(inp, filter)
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/ops/nn_ops.py:231 __call__
return self.conv_op(
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/ops/nn_ops.py:220 _conv1d
return conv1d(
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/util/deprecation.py:574 new_func
return func(*args, **kwargs)
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/util/deprecation.py:574 new_func
return func(*args, **kwargs)
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/ops/nn_ops.py:1655 conv1d
result = gen_nn_ops.conv2d(
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/ops/gen_nn_ops.py:965 conv2d
_, _, _op, _outputs = _op_def_library._apply_op_helper(
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/framework/op_def_library.py:742 _apply_op_helper
op = g._create_op_internal(op_type_name, inputs, dtypes=None,
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py:593 _create_op_internal
return super(FuncGraph, self)._create_op_internal( # pylint: disable=protected-access
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/framework/ops.py:3319 _create_op_internal
ret = Operation(
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/framework/ops.py:1816 __init__
self._c_op = _create_c_op(self._graph, node_def, inputs,
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/framework/ops.py:1657 _create_c_op
raise ValueError(str(e))
ValueError: Negative dimension size caused by subtracting 7 from 6 for '{{node sequential/conv1d_1/conv1d}} = Conv2D[T=DT_FLOAT, data_format="NHWC", dilations=[1, 1, 1, 1], explicit_paddings=[], padding="VALID", strides=[1, 1, 1, 1], use_cudnn_on_gpu=true](sequential/conv1d_1/conv1d/ExpandDims, sequential/conv1d_1/conv1d/ExpandDims_1)' with input shapes: [32,1,6,32], [1,7,32,32].
I myself have been looking for a solution to this problem for a very long time and found it. Yes, not very elegant, but it works. I'll leave it here, maybe it will help someone else.
The point is to wrap the model construction in a try-except block and, if a Value Error occurs, build a highly simplified model. The important thing here is to create your own loss function that would return too large a loss value, which we could catch with our own callback function and stop training the model (code with an example for convolutional neural networks attached)
P.S. It would be possible to make your own loss function return NaN, and catch it with a ready-made callback function TerminateOnNaN(). But for some reason keras-tuner thinks that NaN < any number, and therefore it will give the value NaN for best val_loss
def invalid_loss(y_true, y_pred):
return keras.losses.BinaryCrossentropy()(y_true, y_pred) + 2000000
def invalid_model():
model = keras.Sequential()
model.add(layers.Input((input_shape)))
model.add(layers.Resizing(height=2, width=2))
model.add(layers.Conv2D(filters=1,
kernel_size=2,
activation='relu',
))
model.add(layers.GlobalMaxPooling2D())
model.add(layers.Dense(units=output_shape,
activation="sigmoid",
))
model.compile(optimizer="Adam",
loss=invalid_loss,
metrics=[metrics.BinaryAccuracy()])
return model
def build_model(hp):
try:
model = keras.Sequential()
model.add(layers.Input((input_shape)))
...
model.add(layers.Dense(units=output_shape,
activation=dense_activation))
model.compile(optimizer="Adam",
loss=losses.BinaryCrossentropy(),
metrics=[metrics.BinaryAccuracy()])
except ValueError:
model = invalid_model()
return model
And here is an example of your own callback, which would stop training so as not to waste time on a "invalid" model
class EarlyStoppingByLoss(keras.callbacks.Callback):
def __init__(self, max_loss):
self.max_loss = max_loss
def on_train_batch_end(self, batch, logs=None):
if logs["loss"] >= self.max_loss:
self.model.stop_training = True
You can also control oversized models (for example, if you use Flatten() when switching from convolutional layers to fully connected ones)
from keras.utils.layer_utils import count_params
class EarlyStoppingByModelSize(keras.callbacks.Callback):
def __init__(self, max_size):
self.max_size = max_size
def on_train_begin(self, logs=None):
trainable_count = count_params(self.model.trainable_weights)
if trainable_count > self.max_size:
self.model.stop_training = True
And, accordingly, at the end we add these callbacks to the list and use them when training the model
callbacks = []
callbacks.append(EarlyStoppingByLoss(900000))
callbacks.append(EarlyStoppingByModelSize(12000000))
tuner.search(x=x_train,
y=y_train,
epochs=epochs,
validation_data=(x_test, y_test),
callbacks=callbacks,
verbose=1,
batch_size=batch_size)
For a specific problem in reinforcement learning (inspired in this paper), I'm using a RNN which is fed with data of shape (batch_size, time_steps, features) = (1,1,1), for L data-points, and then a "cycle" is over; with a LSTM cell. I'm using lstm.stateful = True, and after L feeds to the network, I call lstm.reset_states().
Between one cycle and another one, and just after calling lstm.reset_states(), I'd like to evaluate the output of the network on an input data of the shape (batch_size, time_steps, features) = (L,1,1); and then to continue using again the RNN with input of batch_size = 1.
Furthermore, I want the code to be as optimized as possible, and for this I'm trying to use AutoGraph via the #tf.function decorators.
The problem is that I encounter an error, that can be recreated with the following example (notice that if #tf.function is removed, everything works, and I don't understand why?)
import tensorflow as tf
import numpy as np
class Actor(tf.keras.Model):
def __init__(self):
super(Actor,self).__init__()
self.lstm = tf.keras.layers.LSTM(5, return_sequences=True, stateful=True, input_shape=(None,None,1))#, input_shape=(None,None,1))
def call(self, inputs):
feat= self.lstm(inputs)
return feat
actor = Actor()
#tf.function
def g(actor):
context1 = tf.reshape(np.array([0.]*10),(10,1,1))
actor(context1)
actor.reset_states()
actor.lstm.stateful=False
context = tf.reshape(np.array([0.]),(1,1,1))
actor(context)
g(actor)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-28-4487772bee64> in <module>
23 actor(context)
24
---> 25 g(actor)
~/.local/lib/python3.6/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
578 xla_context.Exit()
579 else:
--> 580 result = self._call(*args, **kwds)
581
582 if tracing_count == self._get_tracing_count():
~/.local/lib/python3.6/site-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds)
625 # This is the first call of __call__, so we have to initialize.
626 initializers = []
--> 627 self._initialize(args, kwds, add_initializers_to=initializers)
628 finally:
629 # At this point we know that the initialization is complete (or less
~/.local/lib/python3.6/site-packages/tensorflow/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
504 self._concrete_stateful_fn = (
505 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
--> 506 *args, **kwds))
507
508 def invalid_creator_scope(*unused_args, **unused_kwds):
~/.local/lib/python3.6/site-packages/tensorflow/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
2444 args, kwargs = None, None
2445 with self._lock:
-> 2446 graph_function, _, _ = self._maybe_define_function(args, kwargs)
2447 return graph_function
2448
~/.local/lib/python3.6/site-packages/tensorflow/python/eager/function.py in _maybe_define_function(self, args, kwargs)
2775
2776 self._function_cache.missed.add(call_context_key)
-> 2777 graph_function = self._create_graph_function(args, kwargs)
2778 self._function_cache.primary[cache_key] = graph_function
2779 return graph_function, args, kwargs
~/.local/lib/python3.6/site-packages/tensorflow/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
2665 arg_names=arg_names,
2666 override_flat_arg_shapes=override_flat_arg_shapes,
-> 2667 capture_by_value=self._capture_by_value),
2668 self._function_attributes,
2669 # Tell the ConcreteFunction to clean up its graph once it goes out of
~/.local/lib/python3.6/site-packages/tensorflow/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
979 _, original_func = tf_decorator.unwrap(python_func)
980
--> 981 func_outputs = python_func(*func_args, **func_kwargs)
982
983 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
~/.local/lib/python3.6/site-packages/tensorflow/python/eager/def_function.py in wrapped_fn(*args, **kwds)
439 # __wrapped__ allows AutoGraph to swap in a converted function. We give
440 # the function a weak reference to itself to avoid a reference cycle.
--> 441 return weak_wrapped_fn().__wrapped__(*args, **kwds)
442 weak_wrapped_fn = weakref.ref(wrapped_fn)
443
~/.local/lib/python3.6/site-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
966 except Exception as e: # pylint:disable=broad-except
967 if hasattr(e, "ag_error_metadata"):
--> 968 raise e.ag_error_metadata.to_exception(e)
969 else:
970 raise
ValueError: in user code:
<ipython-input-28-4487772bee64>:23 g *
actor(context)
<ipython-input-28-4487772bee64>:11 call *
feat= self.lstm(inputs)
/home/cooper-cooper/.local/lib/python3.6/site-packages/tensorflow/python/keras/layers/recurrent.py:654 __call__ **
return super(RNN, self).__call__(inputs, **kwargs)
/home/cooper-cooper/.local/lib/python3.6/site-packages/tensorflow/python/keras/engine/base_layer.py:886 __call__
self.name)
/home/cooper-cooper/.local/lib/python3.6/site-packages/tensorflow/python/keras/engine/input_spec.py:227 assert_input_compatibility
', found shape=' + str(shape))
ValueError: Input 0 is incompatible with layer lstm_7: expected shape=(10, None, 1), found shape=[1, 1, 1]
If anyone interested, I found an answer form the following posts, and the workaround solution for me was the following:
import tensorflow as tf
import numpy as np
class Actor(tf.keras.Model):
def __init__(self):
super(Actor,self).__init__()
self.lstm = tf.keras.layers.LSTM(5, return_sequences=True, stateful=True,input_shape=(1,1))#, input_shape=(None,None,1))
def call(self, inputs):
feat= self.lstm(inputs)
return feat
def reset_states_workaround(self, new_batch_size=1):
self.lstm.states = [tf.Variable(tf.random.uniform((new_batch_size,5))), tf.Variable(tf.random.uniform((new_batch_size,5)))]
self.lstm.input_spec = [tf.keras.layers.InputSpec(shape=(new_batch_size,None,1), ndim=3)]
And then, between two different calls with #tf.function, I do:
actor = Actor()
#tf.function
def g(actor):
context1 = tf.reshape(np.array([0.]*10),(10,1,1))
preds = actor(context1)
return preds
g(actor)
actor.reset_states_workaround(new_batch_size=1)
#tf.function
def g2(actor):
context1 = tf.reshape(np.array([0.]*1),(1,1,1))
preds = actor(context1)
return preds
g2(actor)
A problem with using actor.reset_states_workaround(new_batch_size=1) inside #tf.function arises though: ValueError: tf.function-decorated function tried to create variables on non-first call., and that's why i'm using it outside.
I cannot understand how to use tensorflow dataset as input for my model. I have a X as (n_sample, max_sentence_size) and a y as (n_sample) but I cannot match the dimension, I am not sure what tensorflow do internaly.
Below you can find a reprroducible example with empty matrix, but my data is not empty, it is an integer representation of text.
X_train = np.zeros((16, 6760))
y_train = np.zeros((16))
train = tf.data.Dataset.from_tensor_slices((X_train, y_train))
# Prepare for tensorflow
BUFFER_SIZE = 10000
BATCH_SIZE = 64
VOCAB_SIZE = 5354
train = train.shuffle(BUFFER_SIZE)#.batch(BATCH_SIZE)
# Select index of interest in text
import tensorflow as tf
model = tf.keras.Sequential([
tf.keras.layers.Embedding(input_dim=VOCAB_SIZE, output_dim=64, mask_zero=False),
tf.keras.layers.Bidirectional(tf.keras.layers.GRU(64)),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(VOCAB_SIZE, activation='softmax'),
])
model.compile(loss="sparse_categorical_crossentropy",
# loss=tf.keras.losses.MeanAbsoluteError(),
optimizer=tf.keras.optimizers.Adam(1e-4),
metrics=['sparse_categorical_accuracy'])
history = model.fit(train, epochs=3,
)
ValueError Traceback (most recent call last)
<ipython-input-74-3a160a5713dd> in <module>
----> 1 history = model.fit(train, epochs=3,
2 # validation_data=test,
3 # validation_steps=30
4 )
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, max_queue_size, workers, use_multiprocessing,
**kwargs)
817 max_queue_size=max_queue_size,
818 workers=workers,
--> 819 use_multiprocessing=use_multiprocessing)
820
821 def evaluate(self,
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_v2.py in fit(self, model, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, max_queue_size, workers, use_multiprocessing,
**kwargs)
340 mode=ModeKeys.TRAIN,
341 training_context=training_context,
--> 342 total_epochs=epochs)
343 cbks.make_logs(model, epoch_logs, training_result, ModeKeys.TRAIN)
344
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_v2.py in run_one_epoch(model, iterator, execution_function, dataset_size, batch_size, strategy, steps_per_epoch, num_samples, mode, training_context, total_epochs)
126 step=step, mode=mode, size=current_batch_size) as batch_logs:
127 try:
--> 128 batch_outs = execution_function(iterator)
129 except (StopIteration, errors.OutOfRangeError):
130 # TODO(kaftan): File bug about tf function and errors.OutOfRangeError?
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_v2_utils.py in execution_function(input_fn)
96 # `numpy` translates Tensors to values in Eager mode.
97 return nest.map_structure(_non_none_constant_value,
---> 98 distributed_function(input_fn))
99
100 return execution_function
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/eager/def_function.py in __call__(self, *args, **kwds)
566 xla_context.Exit()
567 else:
--> 568 result = self._call(*args, **kwds)
569
570 if tracing_count == self._get_tracing_count():
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/eager/def_function.py in _call(self, *args, **kwds)
613 # This is the first call of __call__, so we have to initialize.
614 initializers = []
--> 615 self._initialize(args, kwds, add_initializers_to=initializers)
616 finally:
617 # At this point we know that the initialization is complete (or less
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
495 self._concrete_stateful_fn = (
496 self._stateful_fn._get_concrete_function_internal_garbage_collected(
# pylint: disable=protected-access
--> 497 *args, **kwds))
498
499 def invalid_creator_scope(*unused_args, **unused_kwds):
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args,
**kwargs)
2387 args, kwargs = None, None
2388 with self._lock:
-> 2389 graph_function, _, _ = self._maybe_define_function(args, kwargs)
2390 return graph_function
2391
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/eager/function.py in _maybe_define_function(self, args, kwargs)
2701
2702 self._function_cache.missed.add(call_context_key)
-> 2703 graph_function = self._create_graph_function(args, kwargs)
2704 self._function_cache.primary[cache_key] = graph_function
2705 return graph_function, args, kwargs
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
2591 arg_names=arg_names,
2592 override_flat_arg_shapes=override_flat_arg_shapes,
-> 2593 capture_by_value=self._capture_by_value),
2594 self._function_attributes,
2595 # Tell the ConcreteFunction to clean up its graph once it goes out of
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
976 converted_func)
977
--> 978 func_outputs = python_func(*func_args, **func_kwargs)
979
980 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/eager/def_function.py in wrapped_fn(*args, **kwds)
437 # __wrapped__ allows AutoGraph to swap in a converted function. We give
438 # the function a weak reference to itself to avoid a reference cycle.
--> 439 return weak_wrapped_fn().__wrapped__(*args, **kwds)
440 weak_wrapped_fn = weakref.ref(wrapped_fn)
441
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_v2_utils.py in distributed_function(input_iterator)
83 args = _prepare_feed_values(model, input_iterator, mode, strategy)
84 outputs = strategy.experimental_run_v2(
---> 85 per_replica_function, args=args)
86 # Out of PerReplica outputs reduce or pick values to return.
87 all_outputs = dist_utils.unwrap_output_dict(
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/distribute/distribute_lib.py in experimental_run_v2(self, fn, args, kwargs)
761 fn = autograph.tf_convert(fn, ag_ctx.control_status_ctx(),
762 convert_by_default=False)
--> 763 return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
764
765 def reduce(self, reduce_op, value, axis):
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/distribute/distribute_lib.py in call_for_each_replica(self, fn, args, kwargs)
1817 kwargs = {}
1818 with self._container_strategy().scope():
-> 1819 return self._call_for_each_replica(fn, args, kwargs)
1820
1821 def _call_for_each_replica(self, fn, args, kwargs):
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/distribute/distribute_lib.py in _call_for_each_replica(self, fn, args, kwargs)
2162 self._container_strategy(),
2163 replica_id_in_sync_group=constant_op.constant(0, dtypes.int32)):
-> 2164 return fn(*args, **kwargs)
2165
2166 def _reduce_to(self, reduce_op, value, destinations):
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/autograph/impl/api.py in wrapper(*args, **kwargs)
290 def wrapper(*args, **kwargs):
291 with ag_ctx.ControlStatusCtx(status=ag_ctx.Status.DISABLED):
--> 292 return func(*args, **kwargs)
293
294 if inspect.isfunction(func) or inspect.ismethod(func):
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_v2_utils.py in train_on_batch(model, x, y, sample_weight, class_weight, reset_metrics, standalone)
431 y,
432 sample_weights=sample_weights,
--> 433 output_loss_metrics=model._output_loss_metrics)
434
435 if reset_metrics:
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_eager.py in train_on_batch(model, inputs, targets, sample_weights, output_loss_metrics)
310 sample_weights=sample_weights,
311 training=True,
--> 312 output_loss_metrics=output_loss_metrics))
313 if not isinstance(outs, list):
314 outs = [outs]
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_eager.py in _process_single_batch(model, inputs, targets, output_loss_metrics, sample_weights, training)
251 output_loss_metrics=output_loss_metrics,
252 sample_weights=sample_weights,
--> 253 training=training))
254 if total_loss is None:
255 raise ValueError('The model cannot be run '
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_eager.py in _model_loss(model, inputs, targets, output_loss_metrics, sample_weights, training)
165
166 if hasattr(loss_fn, 'reduction'):
--> 167 per_sample_losses = loss_fn.call(targets[i], outs[i])
168 weighted_losses = losses_utils.compute_weighted_loss(
169 per_sample_losses,
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/keras/losses.py in call(self, y_true, y_pred)
219 y_pred, y_true = tf_losses_util.squeeze_or_expand_dimensions(
220 y_pred, y_true)
--> 221 return self.fn(y_true, y_pred, **self._fn_kwargs)
222
223 def get_config(self):
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/keras/losses.py in sparse_categorical_crossentropy(y_true, y_pred, from_logits, axis)
976 def sparse_categorical_crossentropy(y_true, y_pred, from_logits=False, axis=-1):
977 return K.sparse_categorical_crossentropy(
--> 978 y_true, y_pred, from_logits=from_logits, axis=axis)
979
980
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/keras/backend.py in sparse_categorical_crossentropy(target, output, from_logits, axis)
4571 with get_graph().as_default():
4572 res = nn.sparse_softmax_cross_entropy_with_logits_v2(
-> 4573 labels=target, logits=output)
4574 else:
4575 res = nn.sparse_softmax_cross_entropy_with_logits_v2(
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py in sparse_softmax_cross_entropy_with_logits_v2(labels, logits, name)
3535 """
3536 return sparse_softmax_cross_entropy_with_logits(
-> 3537 labels=labels, logits=logits, name=name)
3538
3539
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py in sparse_softmax_cross_entropy_with_logits(_sentinel, labels, logits, name)
3451 "should equal the shape of logits except for the last "
3452 "dimension (received %s)." % (labels_static_shape,
-> 3453 logits.get_shape()))
3454 # Check if no reshapes are required.
3455 if logits.get_shape().ndims == 2:
ValueError: Shape mismatch: The shape of labels (received (1,)) should equal the shape of logits except for the last dimension (received (6760, 5354)).
this works for me in Tensorflow 2.0.
import numpy as np
# Prepare for tensorflow
BUFFER_SIZE = 10000
BATCH_SIZE = 64
VOCAB_SIZE = 5354
X_train = np.zeros((16,6760))
y_train = np.zeros((16,1)) # This is changed
train = tf.data.Dataset.from_tensor_slices((X_train, y_train))
train = train.shuffle(BUFFER_SIZE).batch(8) # This is changed
# Select index of interest in text
model = tf.keras.Sequential([
tf.keras.layers.Embedding(input_dim=VOCAB_SIZE, output_dim=64,input_length= 6760, mask_zero=False),
tf.keras.layers.Bidirectional(tf.keras.layers.GRU(64)),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(VOCAB_SIZE, activation='softmax'),
])
print(model.summary())
model.compile(loss="sparse_categorical_crossentropy",
# loss=tf.keras.losses.MeanAbsoluteError(),
optimizer=tf.keras.optimizers.Adam(1e-4),
metrics=['sparse_categorical_accuracy'])
history = model.fit(train, epochs=3)
For those with the same problem, I didn't understood immediatly the change of rajesh, the problem was the absence of batch dimension.
I replaced :
train = train.shuffle(BUFFER_SIZE) #.batch(BATCH_SIZE)
with (uncommented the "batch") :
train = train.shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
and it worked.
I have what would appear to be a pretty straightforward model fitting process in a Jupyter notebook:
model = tf.keras.Sequential([
feature_layer,
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(7, activation='softmax')
])
model.compile(loss='sparse_categorical_crossentropy',
optimizer=tf.keras.optimizers.Adamax(),
metrics=['accuracy'])
model.fit(train_ds, validation_data=val_ds,
epochs=1, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='accuracy', patience=10)])
When fitting is complete, I call tf.keras.models.save_model(model) (I have also tried model.save(path)), which saves successfully only if the notebook kernel has been loaded from scratch, otherwise it fails. For example if I run:
tf.keras.backend.clear_session()
tf.compat.v1.reset_default_graph()
del(model)
And try to execute the first block of code again, without reseting the kernel, I will get:
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-29-d8fda83a42df> in <module>
----> 1 model.save('test3')
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/keras/engine/network.py in save(self, filepath, overwrite, include_optimizer, save_format, signatures, options)
1006 """
1007 save.save_model(self, filepath, overwrite, include_optimizer, save_format,
-> 1008 signatures, options)
1009
1010 def save_weights(self, filepath, overwrite=True, save_format=None):
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/keras/saving/save.py in save_model(model, filepath, overwrite, include_optimizer, save_format, signatures, options)
113 else:
114 saved_model_save.save(model, filepath, overwrite, include_optimizer,
--> 115 signatures, options)
116
117
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/keras/saving/saved_model/save.py in save(model, filepath, overwrite, include_optimizer, signatures, options)
76 # we use the default replica context here.
77 with distribution_strategy_context._get_default_replica_context(): # pylint: disable=protected-access
---> 78 save_lib.save(model, filepath, signatures, options)
79
80 if not include_optimizer:
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/saved_model/save.py in save(obj, export_dir, signatures, options)
897 # Note we run this twice since, while constructing the view the first time
898 # there can be side effects of creating variables.
--> 899 _ = _SaveableView(checkpoint_graph_view)
900 saveable_view = _SaveableView(checkpoint_graph_view)
901
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/saved_model/save.py in __init__(self, checkpoint_view)
185 # variables on first run.
186 concrete_functions = (
--> 187 function._list_all_concrete_functions_for_serialization()) # pylint: disable=protected-access
188 else:
189 concrete_functions = [function]
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/eager/def_function.py in _list_all_concrete_functions_for_serialization(self)
797 """
798 if self.input_signature is not None:
--> 799 self.get_concrete_function()
800 concrete_functions = []
801 # pylint: disable=protected-access
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/eager/def_function.py in get_concrete_function(self, *args, **kwargs)
907 if self._stateful_fn is None:
908 initializers = []
--> 909 self._initialize(args, kwargs, add_initializers_to=initializers)
910 self._initialize_uninitialized_variables(initializers)
911
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
495 self._concrete_stateful_fn = (
496 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
--> 497 *args, **kwds))
498
499 def invalid_creator_scope(*unused_args, **unused_kwds):
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
2387 args, kwargs = None, None
2388 with self._lock:
-> 2389 graph_function, _, _ = self._maybe_define_function(args, kwargs)
2390 return graph_function
2391
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/eager/function.py in _maybe_define_function(self, args, kwargs)
2701
2702 self._function_cache.missed.add(call_context_key)
-> 2703 graph_function = self._create_graph_function(args, kwargs)
2704 self._function_cache.primary[cache_key] = graph_function
2705 return graph_function, args, kwargs
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
2591 arg_names=arg_names,
2592 override_flat_arg_shapes=override_flat_arg_shapes,
-> 2593 capture_by_value=self._capture_by_value),
2594 self._function_attributes,
2595 # Tell the ConcreteFunction to clean up its graph once it goes out of
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
976 converted_func)
977
--> 978 func_outputs = python_func(*func_args, **func_kwargs)
979
980 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/eager/def_function.py in wrapped_fn(*args, **kwds)
437 # __wrapped__ allows AutoGraph to swap in a converted function. We give
438 # the function a weak reference to itself to avoid a reference cycle.
--> 439 return weak_wrapped_fn().__wrapped__(*args, **kwds)
440 weak_wrapped_fn = weakref.ref(wrapped_fn)
441
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/training/tracking/tracking.py in _initializer()
242 #def_function.function(input_signature=[], autograph=False)
243 def _initializer():
--> 244 self._initialize()
245 return 1 # Dummy return
246
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/ops/lookup_ops.py in _initialize(self)
179
180 def _initialize(self):
--> 181 return self._initializer.initialize(self)
182
183 #property
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/ops/lookup_ops.py in initialize(self, table)
462 self._name, values=(table.resource_handle, self._keys, self._values)):
463 init_op = gen_lookup_ops.lookup_table_import_v2(table.resource_handle,
--> 464 self._keys, self._values)
465 ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)
466 return init_op
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/ops/gen_lookup_ops.py in lookup_table_import_v2(table_handle, keys, values, name)
700 _, _, _op, _outputs = _op_def_library._apply_op_helper(
701 "LookupTableImportV2", table_handle=table_handle, keys=keys,
--> 702 values=values, name=name)
703 return _op
704 LookupTableImportV2 = tf_export("raw_ops.LookupTableImportV2")(_ops.to_raw_op(lookup_table_import_v2))
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/framework/op_def_library.py in _apply_op_helper(op_type_name, name, **keywords)
740 op = g._create_op_internal(op_type_name, inputs, dtypes=None,
741 name=scope, input_types=input_types,
--> 742 attrs=attr_protos, op_def=op_def)
743
744 # `outputs` is returned as a separate return value so that the output
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/framework/func_graph.py in _create_op_internal(self, op_type, inputs, dtypes, input_types, name, attrs, op_def, compute_device)
589 if ctxt is not None and hasattr(ctxt, "AddValue"):
590 inp = ctxt.AddValue(inp)
--> 591 inp = self.capture(inp)
592 inputs[i] = inp
593 return super(FuncGraph, self)._create_op_internal( # pylint: disable=protected-access
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/framework/func_graph.py in capture(self, tensor, name, shape)
628 # Large EagerTensors and resources are captured with Placeholder ops
629 return self._capture_helper(tensor, name, shape)
--> 630 if tensor.graph is not self:
631 if name is None:
632 name = tensor.op.name
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/framework/ops.py in graph(self)
414 def graph(self):
415 """The `Graph` that contains this tensor."""
--> 416 return self._op.graph
417
418 #property
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/framework/ops.py in graph(self)
2223 def graph(self):
2224 """The `Graph` that contains this operation."""
-> 2225 return self._graph
2226
2227 #property
AttributeError: 'Operation' object has no attribute '_graph'
Any help would be appreciated.
I have recreated your code and successfully ran it without any errors. Kindly use the latest stable release of TensorFlow in this case 2.1.0.
If the problem still persists, try updating your Jupyter Notebook, Python (to the latest supported version), and or try simulating it first in Google Colab to ensure that this is not related to any hardware issues.
Screenshot of Successful execution:
Here is my code:
import tensorflow as tf
print('TensorFlow Version: {}'.format(tf.__version__))
# Synthetic Data
x, y = tf.random.uniform(shape=(1000, 7, 1), maxval = 1), tf.random.uniform(shape=(1000, 7,1), maxval =1)
# Model
feature_layer = tf.keras.layers.Input(shape = (7,1))
model = tf.keras.models.Sequential([
feature_layer,
tf.keras.layers.Dense(128, activation='relu', input_shape = (7,1)),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(7, activation='softmax')
])
model.compile(loss='sparse_categorical_crossentropy', optimizer=tf.keras.optimizers.Adamax())
model.fit(x, y, epochs=1)
# 1st Save
first_model_loc = '/tmp/tf_keras_1st_run/'
tf.keras.models.save_model(model, filepath= first_model_loc)
print('\nSuccessfully saved at {}\n\n'.format(first_model_loc)) # Progress Checking
#
tf.keras.backend.clear_session()
tf.compat.v1.reset_default_graph()
del(model)
#
# Model
feature_layer = tf.keras.layers.Input(shape = (7,1))
model = tf.keras.models.Sequential([
feature_layer,
tf.keras.layers.Dense(128, activation='relu', input_shape = (7,1)),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(7, activation='softmax')
])
model.compile(loss='sparse_categorical_crossentropy', optimizer=tf.keras.optimizers.Adamax())
model.fit(x, y, epochs=1)
# 2nd Save
second_model_loc = '/tmp/tf_keras_2nd_run/'
tf.keras.models.save_model(model, filepath=second_model_loc)
print('\nSuccessfully saved at {}'.format(second_model_loc)) # Progress Checking
I met a problem when I tried to use tensorflow2.0 to create a transformer based on the official guidelines posted by the TensorFlow and when I add a full connected net it seems that both the classification loss and the translate loss as gradients on some of the variables.
But once I try to add the two loss the gradients to all variables disappear. I have no idea and I tried to figure to solved the problem for weeks. Could anyone give me some suggestions?
#tf.function(input_signature=train_step_signature)
def train_step(group, inp, tar, label):
tar_inp = tar[:, :-1]
tar_real = tar[:, 1:] # sess=tf.compat.v1.Session()
enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp, tar_inp)
with tf.GradientTape(persistent=True) as tape:
classfication, predictions, _ = transformer(inp, tar_inp,
True,
enc_padding_mask,
combined_mask,
dec_padding_mask)
loss = loss_function(tar_real, predictions)
loss2 = tf.nn.softmax_cross_entropy_with_logits(label, classfication)
#print(loss,loss2)
a=tape.gradient(loss,trainsformer.trainable_variable)
gradients = tape.gradient(loss+loss2, transformer.trainable_variables)
optimizer.apply_gradients(zip(gradients, transformer.trainable_variables))
class_loss(loss2)
train_loss(loss)
train_accuracy(tar_real, predictions)
below is my error infomation
ValueError Traceback (most recent call last)
<ipython-input-2-81054f0385cb> in <module>()
999 # inp -> portuguese, tar -> english
1000 for (batch, (group, inp, tar, label)) in enumerate(train_dataset):
-> 1001 train_step(group, inp, tar, label)
1002 if batch % 50 == 0:
1003 print(
8 frames
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/eager/def_function.py in __call__(self, *args, **kwds)
455
456 tracing_count = self._get_tracing_count()
--> 457 result = self._call(*args, **kwds)
458 if tracing_count == self._get_tracing_count():
459 self._call_counter.called_without_tracing()
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/eager/def_function.py in _call(self, *args, **kwds)
501 # This is the first call of __call__, so we have to initialize.
502 initializer_map = object_identity.ObjectIdentityDictionary()
--> 503 self._initialize(args, kwds, add_initializers_to=initializer_map)
504 finally:
505 # At this point we know that the initialization is complete (or less
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
406 self._concrete_stateful_fn = (
407 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
--> 408 *args, **kwds))
409
410 def invalid_creator_scope(*unused_args, **unused_kwds):
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
1846 if self.input_signature:
1847 args, kwargs = None, None
-> 1848 graph_function, _, _ = self._maybe_define_function(args, kwargs)
1849 return graph_function
1850
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/eager/function.py in _maybe_define_function(self, args, kwargs)
2148 graph_function = self._function_cache.primary.get(cache_key, None)
2149 if graph_function is None:
-> 2150 graph_function = self._create_graph_function(args, kwargs)
2151 self._function_cache.primary[cache_key] = graph_function
2152 return graph_function, args, kwargs
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
2039 arg_names=arg_names,
2040 override_flat_arg_shapes=override_flat_arg_shapes,
-> 2041 capture_by_value=self._capture_by_value),
2042 self._function_attributes,
2043 # Tell the ConcreteFunction to clean up its graph once it goes out of
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
913 converted_func)
914
--> 915 func_outputs = python_func(*func_args, **func_kwargs)
916
917 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/eager/def_function.py in wrapped_fn(*args, **kwds)
356 # __wrapped__ allows AutoGraph to swap in a converted function. We give
357 # the function a weak reference to itself to avoid a reference cycle.
--> 358 return weak_wrapped_fn().__wrapped__(*args, **kwds)
359 weak_wrapped_fn = weakref.ref(wrapped_fn)
360
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/func_graph.py in wrapper(*args, **kwargs)
903 except Exception as e: # pylint:disable=broad-except
904 if hasattr(e, "ag_error_metadata"):
--> 905 raise e.ag_error_metadata.to_exception(e)
906 else:
907 raise
ValueError: in converted code:
<ipython-input-1-81054f0385cb>:856 train_step *
optimizer.apply_gradients(zip(gradients, transformer.trainable_variables))
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/optimizer_v2/optimizer_v2.py:427 apply_gradients
grads_and_vars = _filter_grads(grads_and_vars)
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/optimizer_v2/optimizer_v2.py:1025 _filter_grads
([v.name for _, v in grads_and_vars],))
ValueError: No gradients provided for any variable: ['transformer_1/encoder_1/embedding_2/embeddings:0', 'transformer_1/encoder_1/encoder_layer_6/multi_head_attention_18/dense_98/kernel:0', 'transformer_1/encoder_1/encoder_layer_6/multi_head_attention_18/dense_98/bias:0', 'transformer_1/encoder_1/encoder_layer_6/multi_head_attention_18/dense_99/kernel:0', 'transformer_1/encoder_1/encoder_layer_6/multi_head_attention_18/dense_99/bias:0', 'transformer_1/encoder_1/encoder_layer_6/multi_head_attention_18/dense_100/kernel:0', 'transformer_1/encoder_1/encoder_layer_6/multi_head_attention_18/dense_100/bias:0', 'transformer_1/encoder_1/encoder_layer_6/multi_head_attention_18/dense_101/kernel:0', 'transformer_1/encoder_1/encoder_layer_6/multi_head_attention_18/dense_101/bias:0', 'transformer_1/encoder_1/encoder_layer_6/sequential_12/dense_102/kernel:0', 'transformer_1/encoder_1/encoder_layer_6/sequential_12/dense_102/bias:0', 'transformer_1/encoder_1/encoder_layer_6/sequential_12/dense_103/kernel:0', 'transformer_1/encoder_1/encoder_layer_6/sequential_12/dense_103/bias:0', 'transformer_1/encoder_1/encoder_layer_6/layer_normalization_30/gamma:0', 'transformer_1/encoder_1/encoder_layer_6/layer_normalization_30/beta:0', 'transformer_1/encoder_1/encoder_layer_6/layer_normalization_31/gamma:0', 'transformer_1/encoder_1/encoder_layer_6/layer_normalization_31/beta:0', 'transformer_1/encoder_1/encoder_layer_7/multi_head_attention_19/dense_104/kernel:0', 'transformer_1/encoder_1/encoder...
Yup, this is a mildly annoying thing about GradientTape. You cannot do anything to the tensors outside the tape context (with...) or the tape will "lose track". You can fix it by simply moving the addition into the context:
with tf.GradientTape(persistent=True) as tape:
classfication, predictions, _ = transformer(inp, tar_inp,
True,
enc_padding_mask,
combined_mask,
dec_padding_mask)
loss = loss_function(tar_real, predictions)
loss2 = tf.nn.softmax_cross_entropy_with_logits(label, classfication)
added_loss = loss + loss2
#print(loss,loss2)
a=tape.gradient(loss,trainsformer.trainable_variable)
gradients = tape.gradient(added_loss, transformer.trainable_variables)