I met a problem when I tried to use tensorflow2.0 to create a transformer based on the official guidelines posted by the TensorFlow and when I add a full connected net it seems that both the classification loss and the translate loss as gradients on some of the variables.
But once I try to add the two loss the gradients to all variables disappear. I have no idea and I tried to figure to solved the problem for weeks. Could anyone give me some suggestions?
#tf.function(input_signature=train_step_signature)
def train_step(group, inp, tar, label):
tar_inp = tar[:, :-1]
tar_real = tar[:, 1:] # sess=tf.compat.v1.Session()
enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp, tar_inp)
with tf.GradientTape(persistent=True) as tape:
classfication, predictions, _ = transformer(inp, tar_inp,
True,
enc_padding_mask,
combined_mask,
dec_padding_mask)
loss = loss_function(tar_real, predictions)
loss2 = tf.nn.softmax_cross_entropy_with_logits(label, classfication)
#print(loss,loss2)
a=tape.gradient(loss,trainsformer.trainable_variable)
gradients = tape.gradient(loss+loss2, transformer.trainable_variables)
optimizer.apply_gradients(zip(gradients, transformer.trainable_variables))
class_loss(loss2)
train_loss(loss)
train_accuracy(tar_real, predictions)
below is my error infomation
ValueError Traceback (most recent call last)
<ipython-input-2-81054f0385cb> in <module>()
999 # inp -> portuguese, tar -> english
1000 for (batch, (group, inp, tar, label)) in enumerate(train_dataset):
-> 1001 train_step(group, inp, tar, label)
1002 if batch % 50 == 0:
1003 print(
8 frames
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/eager/def_function.py in __call__(self, *args, **kwds)
455
456 tracing_count = self._get_tracing_count()
--> 457 result = self._call(*args, **kwds)
458 if tracing_count == self._get_tracing_count():
459 self._call_counter.called_without_tracing()
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/eager/def_function.py in _call(self, *args, **kwds)
501 # This is the first call of __call__, so we have to initialize.
502 initializer_map = object_identity.ObjectIdentityDictionary()
--> 503 self._initialize(args, kwds, add_initializers_to=initializer_map)
504 finally:
505 # At this point we know that the initialization is complete (or less
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
406 self._concrete_stateful_fn = (
407 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
--> 408 *args, **kwds))
409
410 def invalid_creator_scope(*unused_args, **unused_kwds):
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
1846 if self.input_signature:
1847 args, kwargs = None, None
-> 1848 graph_function, _, _ = self._maybe_define_function(args, kwargs)
1849 return graph_function
1850
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/eager/function.py in _maybe_define_function(self, args, kwargs)
2148 graph_function = self._function_cache.primary.get(cache_key, None)
2149 if graph_function is None:
-> 2150 graph_function = self._create_graph_function(args, kwargs)
2151 self._function_cache.primary[cache_key] = graph_function
2152 return graph_function, args, kwargs
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
2039 arg_names=arg_names,
2040 override_flat_arg_shapes=override_flat_arg_shapes,
-> 2041 capture_by_value=self._capture_by_value),
2042 self._function_attributes,
2043 # Tell the ConcreteFunction to clean up its graph once it goes out of
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
913 converted_func)
914
--> 915 func_outputs = python_func(*func_args, **func_kwargs)
916
917 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/eager/def_function.py in wrapped_fn(*args, **kwds)
356 # __wrapped__ allows AutoGraph to swap in a converted function. We give
357 # the function a weak reference to itself to avoid a reference cycle.
--> 358 return weak_wrapped_fn().__wrapped__(*args, **kwds)
359 weak_wrapped_fn = weakref.ref(wrapped_fn)
360
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/func_graph.py in wrapper(*args, **kwargs)
903 except Exception as e: # pylint:disable=broad-except
904 if hasattr(e, "ag_error_metadata"):
--> 905 raise e.ag_error_metadata.to_exception(e)
906 else:
907 raise
ValueError: in converted code:
<ipython-input-1-81054f0385cb>:856 train_step *
optimizer.apply_gradients(zip(gradients, transformer.trainable_variables))
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/optimizer_v2/optimizer_v2.py:427 apply_gradients
grads_and_vars = _filter_grads(grads_and_vars)
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/optimizer_v2/optimizer_v2.py:1025 _filter_grads
([v.name for _, v in grads_and_vars],))
ValueError: No gradients provided for any variable: ['transformer_1/encoder_1/embedding_2/embeddings:0', 'transformer_1/encoder_1/encoder_layer_6/multi_head_attention_18/dense_98/kernel:0', 'transformer_1/encoder_1/encoder_layer_6/multi_head_attention_18/dense_98/bias:0', 'transformer_1/encoder_1/encoder_layer_6/multi_head_attention_18/dense_99/kernel:0', 'transformer_1/encoder_1/encoder_layer_6/multi_head_attention_18/dense_99/bias:0', 'transformer_1/encoder_1/encoder_layer_6/multi_head_attention_18/dense_100/kernel:0', 'transformer_1/encoder_1/encoder_layer_6/multi_head_attention_18/dense_100/bias:0', 'transformer_1/encoder_1/encoder_layer_6/multi_head_attention_18/dense_101/kernel:0', 'transformer_1/encoder_1/encoder_layer_6/multi_head_attention_18/dense_101/bias:0', 'transformer_1/encoder_1/encoder_layer_6/sequential_12/dense_102/kernel:0', 'transformer_1/encoder_1/encoder_layer_6/sequential_12/dense_102/bias:0', 'transformer_1/encoder_1/encoder_layer_6/sequential_12/dense_103/kernel:0', 'transformer_1/encoder_1/encoder_layer_6/sequential_12/dense_103/bias:0', 'transformer_1/encoder_1/encoder_layer_6/layer_normalization_30/gamma:0', 'transformer_1/encoder_1/encoder_layer_6/layer_normalization_30/beta:0', 'transformer_1/encoder_1/encoder_layer_6/layer_normalization_31/gamma:0', 'transformer_1/encoder_1/encoder_layer_6/layer_normalization_31/beta:0', 'transformer_1/encoder_1/encoder_layer_7/multi_head_attention_19/dense_104/kernel:0', 'transformer_1/encoder_1/encoder...
Yup, this is a mildly annoying thing about GradientTape. You cannot do anything to the tensors outside the tape context (with...) or the tape will "lose track". You can fix it by simply moving the addition into the context:
with tf.GradientTape(persistent=True) as tape:
classfication, predictions, _ = transformer(inp, tar_inp,
True,
enc_padding_mask,
combined_mask,
dec_padding_mask)
loss = loss_function(tar_real, predictions)
loss2 = tf.nn.softmax_cross_entropy_with_logits(label, classfication)
added_loss = loss + loss2
#print(loss,loss2)
a=tape.gradient(loss,trainsformer.trainable_variable)
gradients = tape.gradient(added_loss, transformer.trainable_variables)
Related
I was trying to see if I can detect Higgs Boson using transfer learning and I am unable to understand the error message.
I was wondering if it has something to do with the fact that the mentioned model was designed for computer vision so it'll work only for that (which I don't think is the case, but any inputs are appreciated)
Heres the code and error message
import tensorflow.compat.v2 as tf
import tensorflow_hub as hub
m = hub.KerasLayer('https://tfhub.dev/google/on_device_vision/classifier/landmarks_classifier_oceania_antarctica_V1/1')
m = tf.keras.Sequential([
m,
tf.keras.layers.Dense(2, activation='softmax'),
])
m.compile(loss = 'binary_crossentropy',
optimizer = 'adam', metrics = ['accuracy','binary_accuracy'])
history = m.fit(ds_train,validation_data=ds_valid, epochs =12 ,steps_per_epoch=13)
Error :
ValueError Traceback (most recent call last)
<ipython-input-20-0c5a3b4a3d55> in <module>
11 m.compile(loss = 'binary_crossentropy',
12 optimizer = 'adam', metrics = ['accuracy','binary_accuracy'])
---> 13 history = m.fit(ds_train,validation_data=ds_valid, epochs =12 ,steps_per_epoch=13)
/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py in _method_wrapper(self, *args, **kwargs)
64 def _method_wrapper(self, *args, **kwargs):
65 if not self._in_multi_worker_mode(): # pylint: disable=protected-access
---> 66 return method(self, *args, **kwargs)
67
68 # Running inside `run_distribute_coordinator` already.
/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
846 batch_size=batch_size):
847 callbacks.on_train_batch_begin(step)
--> 848 tmp_logs = train_function(iterator)
849 # Catch OutOfRangeError for Datasets of unknown size.
850 # This blocks until the batch has finished executing.
/opt/conda/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
578 xla_context.Exit()
579 else:
--> 580 result = self._call(*args, **kwds)
581
582 if tracing_count == self._get_tracing_count():
/opt/conda/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds)
625 # This is the first call of __call__, so we have to initialize.
626 initializers = []
--> 627 self._initialize(args, kwds, add_initializers_to=initializers)
628 finally:
629 # At this point we know that the initialization is complete (or less
/opt/conda/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
504 self._concrete_stateful_fn = (
505 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
--> 506 *args, **kwds))
507
508 def invalid_creator_scope(*unused_args, **unused_kwds):
/opt/conda/lib/python3.7/site-packages/tensorflow/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
2444 args, kwargs = None, None
2445 with self._lock:
-> 2446 graph_function, _, _ = self._maybe_define_function(args, kwargs)
2447 return graph_function
2448
/opt/conda/lib/python3.7/site-packages/tensorflow/python/eager/function.py in _maybe_define_function(self, args, kwargs)
2775
2776 self._function_cache.missed.add(call_context_key)
-> 2777 graph_function = self._create_graph_function(args, kwargs)
2778 self._function_cache.primary[cache_key] = graph_function
2779 return graph_function, args, kwargs
/opt/conda/lib/python3.7/site-packages/tensorflow/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
2665 arg_names=arg_names,
2666 override_flat_arg_shapes=override_flat_arg_shapes,
-> 2667 capture_by_value=self._capture_by_value),
2668 self._function_attributes,
2669 # Tell the ConcreteFunction to clean up its graph once it goes out of
/opt/conda/lib/python3.7/site-packages/tensorflow/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
979 _, original_func = tf_decorator.unwrap(python_func)
980
--> 981 func_outputs = python_func(*func_args, **func_kwargs)
982
983 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
/opt/conda/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py in wrapped_fn(*args, **kwds)
439 # __wrapped__ allows AutoGraph to swap in a converted function. We give
440 # the function a weak reference to itself to avoid a reference cycle.
--> 441 return weak_wrapped_fn().__wrapped__(*args, **kwds)
442 weak_wrapped_fn = weakref.ref(wrapped_fn)
443
/opt/conda/lib/python3.7/site-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
966 except Exception as e: # pylint:disable=broad-except
967 if hasattr(e, "ag_error_metadata"):
--> 968 raise e.ag_error_metadata.to_exception(e)
969 else:
970 raise
ValueError: in user code:
/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py:571 train_function *
outputs = self.distribute_strategy.run(
/opt/conda/lib/python3.7/site-packages/tensorflow_hub/keras_layer.py:222 call *
result = f()
/opt/conda/lib/python3.7/site-packages/tensorflow/python/eager/function.py:1605 __call__ **
return self._call_impl(args, kwargs)
/opt/conda/lib/python3.7/site-packages/tensorflow/python/eager/function.py:1645 _call_impl
return self._call_flat(args, self.captured_inputs, cancellation_manager)
/opt/conda/lib/python3.7/site-packages/tensorflow/python/eager/function.py:1730 _call_flat
arg.shape))
ValueError: The argument 'images' (value Tensor("IteratorGetNext:0", shape=(None, 28), dtype=float32, device=/job:worker/replica:0/task:0/device:CPU:0)) is not compatible with the shape this function was traced with. Expected shape (None, 321, 321, 3), but got shape (None, 28).
If you called get_concrete_function, you may need to pass a tf.TensorSpec(..., shape=...) with a less specific shape, having None on axes which can vary.
Any effort is appreciated
Thanks a lot
As per the official documentation of Land Marks Classifier,
Inputs are expected to be 3-channel RGB color images of size 321 x
321, scaled to [0, 1].
But from Your Dataset, file format is tfrecord.
When we use Transfer Learning and when we want to reuse the Models either from TF Hub or from tf.keras.applications, our data should be in the predefined-format as mentioned in the documentation.
So, please ensure that your Dataset comprises of Images and resize Image Array to (321,321,3) for the TF Hub Module to work.
I am trying to create an RNN that can generate Shakespeare like text. However, I keep on getting an error about the rank of my input matrices/datasets. I have tried squeezing the matrices, and changing the dimensions etc., but I cannot figure out why it is not working.
Here is my code:
# Get Shakespeare text
import tensorflow as tf
shakespeare_url="https://homl.info/shakespeare"
filepath=tf.keras.utils.get_file('shakespeare.txt',shakespeare_url)
with open(filepath)as f:
shakespeare_text=f.read()
#Tokenizing shakespeare text
tokenizer=tf.keras.preprocessing.text.Tokenizer(char_level=True)
tokenizer.fit_on_texts(shakespeare_text)
max_id=len(tokenizer.word_index)
import numpy as np
encoded=np.array(tokenizer.texts_to_sequences(shakespeare_text))-1
dataset_size=tokenizer.document_count
train_size=dataset_size*90//100
dataset=tf.data.Dataset.from_tensor_slices(encoded[:train_size])
#Put dataset object into windows and batches
n_steps=100
window_length=n_steps+1
dataset=dataset.window(window_length,shift=1, drop_remainder=True)
dataset=dataset.flat_map(lambda window: window.batch(window_length))
batch_size=32
dataset=dataset.shuffle(10000).batch(batch_size)
dataset=dataset.map(lambda window: (window[:,:-1],window[:,1:]))
dataset=dataset.map(lambda X_batch, Y_batch: (tf.one_hot(X_batch, depth=max_id), Y_batch))
new_dataset=dataset.map(lambda X_batch, Y_batch: (tf.squeeze(X_batch),tf.squeeze(Y_batch)))
dataset=dataset.prefetch(1)
new_dataset=new_dataset.prefetch(1)
#make model
model=tf.keras.models.Sequential([
#tf.keras.layers.Flatten(input_shape=[batch_size,None,max_id]),
tf.keras.layers.GRU(128, return_sequences=True, stateful=True, dropout=0.2,recurrent_dropout=0.2,
batch_input_shape=[batch_size,None,max_id],name='gru_1'),
tf.keras.layers.GRU(128, return_sequences=True, stateful=True, dropout=0.2,recurrent_dropout=0.2,name='gru_2'),
#batch_input_shape=[batch_size,100,max_id]),
tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(max_id,activation='softmax',name='time_layer'))
])
class ResetStatesCallback(tf.keras.callbacks.Callback):
def on_epoch_begin(self,epoch,logs):
self.model.reset_states()
model.compile(loss='sparse_categorical_crossentropy', optimizer="adam")
model.fit(new_dataset, epochs=50, callbacks=[ResetStatesCallback()])
I have tried passing dataset and new _dataset, both without success.
The Error is below:
Epoch 1/50
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-329-28b67720fd03> in <module>
1 model.compile(loss='sparse_categorical_crossentropy', optimizer="adam")
----> 2 model.fit(new_dataset, epochs=50, callbacks=[ResetStatesCallback()])
~/anaconda3/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py in _method_wrapper(self, *args, **kwargs)
106 def _method_wrapper(self, *args, **kwargs):
107 if not self._in_multi_worker_mode(): # pylint: disable=protected-access
--> 108 return method(self, *args, **kwargs)
109
110 # Running inside `run_distribute_coordinator` already.
~/anaconda3/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
1096 batch_size=batch_size):
1097 callbacks.on_train_batch_begin(step)
-> 1098 tmp_logs = train_function(iterator)
1099 if data_handler.should_sync:
1100 context.async_wait()
~/anaconda3/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
778 else:
779 compiler = "nonXla"
--> 780 result = self._call(*args, **kwds)
781
782 new_tracing_count = self._get_tracing_count()
~/anaconda3/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds)
821 # This is the first call of __call__, so we have to initialize.
822 initializers = []
--> 823 self._initialize(args, kwds, add_initializers_to=initializers)
824 finally:
825 # At this point we know that the initialization is complete (or less
~/anaconda3/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
695 self._concrete_stateful_fn = (
696 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
--> 697 *args, **kwds))
698
699 def invalid_creator_scope(*unused_args, **unused_kwds):
~/anaconda3/lib/python3.7/site-packages/tensorflow/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
2853 args, kwargs = None, None
2854 with self._lock:
-> 2855 graph_function, _, _ = self._maybe_define_function(args, kwargs)
2856 return graph_function
2857
~/anaconda3/lib/python3.7/site-packages/tensorflow/python/eager/function.py in _maybe_define_function(self, args, kwargs)
3211
3212 self._function_cache.missed.add(call_context_key)
-> 3213 graph_function = self._create_graph_function(args, kwargs)
3214 self._function_cache.primary[cache_key] = graph_function
3215 return graph_function, args, kwargs
~/anaconda3/lib/python3.7/site-packages/tensorflow/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
3073 arg_names=arg_names,
3074 override_flat_arg_shapes=override_flat_arg_shapes,
-> 3075 capture_by_value=self._capture_by_value),
3076 self._function_attributes,
3077 function_spec=self.function_spec,
~/anaconda3/lib/python3.7/site-packages/tensorflow/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
984 _, original_func = tf_decorator.unwrap(python_func)
985
--> 986 func_outputs = python_func(*func_args, **func_kwargs)
987
988 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
~/anaconda3/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py in wrapped_fn(*args, **kwds)
598 # __wrapped__ allows AutoGraph to swap in a converted function. We give
599 # the function a weak reference to itself to avoid a reference cycle.
--> 600 return weak_wrapped_fn().__wrapped__(*args, **kwds)
601 weak_wrapped_fn = weakref.ref(wrapped_fn)
602
~/anaconda3/lib/python3.7/site-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
971 except Exception as e: # pylint:disable=broad-except
972 if hasattr(e, "ag_error_metadata"):
--> 973 raise e.ag_error_metadata.to_exception(e)
974 else:
975 raise
ValueError: in user code:
/Users/dariyankhan/anaconda3/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py:806 train_function *
return step_function(self, iterator)
/Users/dariyankhan/anaconda3/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py:796 step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
/Users/dariyankhan/anaconda3/lib/python3.7/site-packages/tensorflow/python/distribute/distribute_lib.py:1211 run
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
/Users/dariyankhan/anaconda3/lib/python3.7/site-packages/tensorflow/python/distribute/distribute_lib.py:2585 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
/Users/dariyankhan/anaconda3/lib/python3.7/site-packages/tensorflow/python/distribute/distribute_lib.py:2945 _call_for_each_replica
return fn(*args, **kwargs)
/Users/dariyankhan/anaconda3/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py:789 run_step **
outputs = model.train_step(data)
/Users/dariyankhan/anaconda3/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py:747 train_step
y_pred = self(x, training=True)
/Users/dariyankhan/anaconda3/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer.py:976 __call__
self.name)
/Users/dariyankhan/anaconda3/lib/python3.7/site-packages/tensorflow/python/keras/engine/input_spec.py:168 assert_input_compatibility
layer_name + ' is incompatible with the layer: '
ValueError: Input 0 of layer sequential_34 is incompatible with the layer: its rank is undefined, but the layer requires a defined rank.
For a specific problem in reinforcement learning (inspired in this paper), I'm using a RNN which is fed with data of shape (batch_size, time_steps, features) = (1,1,1), for L data-points, and then a "cycle" is over; with a LSTM cell. I'm using lstm.stateful = True, and after L feeds to the network, I call lstm.reset_states().
Between one cycle and another one, and just after calling lstm.reset_states(), I'd like to evaluate the output of the network on an input data of the shape (batch_size, time_steps, features) = (L,1,1); and then to continue using again the RNN with input of batch_size = 1.
Furthermore, I want the code to be as optimized as possible, and for this I'm trying to use AutoGraph via the #tf.function decorators.
The problem is that I encounter an error, that can be recreated with the following example (notice that if #tf.function is removed, everything works, and I don't understand why?)
import tensorflow as tf
import numpy as np
class Actor(tf.keras.Model):
def __init__(self):
super(Actor,self).__init__()
self.lstm = tf.keras.layers.LSTM(5, return_sequences=True, stateful=True, input_shape=(None,None,1))#, input_shape=(None,None,1))
def call(self, inputs):
feat= self.lstm(inputs)
return feat
actor = Actor()
#tf.function
def g(actor):
context1 = tf.reshape(np.array([0.]*10),(10,1,1))
actor(context1)
actor.reset_states()
actor.lstm.stateful=False
context = tf.reshape(np.array([0.]),(1,1,1))
actor(context)
g(actor)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-28-4487772bee64> in <module>
23 actor(context)
24
---> 25 g(actor)
~/.local/lib/python3.6/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
578 xla_context.Exit()
579 else:
--> 580 result = self._call(*args, **kwds)
581
582 if tracing_count == self._get_tracing_count():
~/.local/lib/python3.6/site-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds)
625 # This is the first call of __call__, so we have to initialize.
626 initializers = []
--> 627 self._initialize(args, kwds, add_initializers_to=initializers)
628 finally:
629 # At this point we know that the initialization is complete (or less
~/.local/lib/python3.6/site-packages/tensorflow/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
504 self._concrete_stateful_fn = (
505 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
--> 506 *args, **kwds))
507
508 def invalid_creator_scope(*unused_args, **unused_kwds):
~/.local/lib/python3.6/site-packages/tensorflow/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
2444 args, kwargs = None, None
2445 with self._lock:
-> 2446 graph_function, _, _ = self._maybe_define_function(args, kwargs)
2447 return graph_function
2448
~/.local/lib/python3.6/site-packages/tensorflow/python/eager/function.py in _maybe_define_function(self, args, kwargs)
2775
2776 self._function_cache.missed.add(call_context_key)
-> 2777 graph_function = self._create_graph_function(args, kwargs)
2778 self._function_cache.primary[cache_key] = graph_function
2779 return graph_function, args, kwargs
~/.local/lib/python3.6/site-packages/tensorflow/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
2665 arg_names=arg_names,
2666 override_flat_arg_shapes=override_flat_arg_shapes,
-> 2667 capture_by_value=self._capture_by_value),
2668 self._function_attributes,
2669 # Tell the ConcreteFunction to clean up its graph once it goes out of
~/.local/lib/python3.6/site-packages/tensorflow/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
979 _, original_func = tf_decorator.unwrap(python_func)
980
--> 981 func_outputs = python_func(*func_args, **func_kwargs)
982
983 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
~/.local/lib/python3.6/site-packages/tensorflow/python/eager/def_function.py in wrapped_fn(*args, **kwds)
439 # __wrapped__ allows AutoGraph to swap in a converted function. We give
440 # the function a weak reference to itself to avoid a reference cycle.
--> 441 return weak_wrapped_fn().__wrapped__(*args, **kwds)
442 weak_wrapped_fn = weakref.ref(wrapped_fn)
443
~/.local/lib/python3.6/site-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
966 except Exception as e: # pylint:disable=broad-except
967 if hasattr(e, "ag_error_metadata"):
--> 968 raise e.ag_error_metadata.to_exception(e)
969 else:
970 raise
ValueError: in user code:
<ipython-input-28-4487772bee64>:23 g *
actor(context)
<ipython-input-28-4487772bee64>:11 call *
feat= self.lstm(inputs)
/home/cooper-cooper/.local/lib/python3.6/site-packages/tensorflow/python/keras/layers/recurrent.py:654 __call__ **
return super(RNN, self).__call__(inputs, **kwargs)
/home/cooper-cooper/.local/lib/python3.6/site-packages/tensorflow/python/keras/engine/base_layer.py:886 __call__
self.name)
/home/cooper-cooper/.local/lib/python3.6/site-packages/tensorflow/python/keras/engine/input_spec.py:227 assert_input_compatibility
', found shape=' + str(shape))
ValueError: Input 0 is incompatible with layer lstm_7: expected shape=(10, None, 1), found shape=[1, 1, 1]
If anyone interested, I found an answer form the following posts, and the workaround solution for me was the following:
import tensorflow as tf
import numpy as np
class Actor(tf.keras.Model):
def __init__(self):
super(Actor,self).__init__()
self.lstm = tf.keras.layers.LSTM(5, return_sequences=True, stateful=True,input_shape=(1,1))#, input_shape=(None,None,1))
def call(self, inputs):
feat= self.lstm(inputs)
return feat
def reset_states_workaround(self, new_batch_size=1):
self.lstm.states = [tf.Variable(tf.random.uniform((new_batch_size,5))), tf.Variable(tf.random.uniform((new_batch_size,5)))]
self.lstm.input_spec = [tf.keras.layers.InputSpec(shape=(new_batch_size,None,1), ndim=3)]
And then, between two different calls with #tf.function, I do:
actor = Actor()
#tf.function
def g(actor):
context1 = tf.reshape(np.array([0.]*10),(10,1,1))
preds = actor(context1)
return preds
g(actor)
actor.reset_states_workaround(new_batch_size=1)
#tf.function
def g2(actor):
context1 = tf.reshape(np.array([0.]*1),(1,1,1))
preds = actor(context1)
return preds
g2(actor)
A problem with using actor.reset_states_workaround(new_batch_size=1) inside #tf.function arises though: ValueError: tf.function-decorated function tried to create variables on non-first call., and that's why i'm using it outside.
I cannot understand how to use tensorflow dataset as input for my model. I have a X as (n_sample, max_sentence_size) and a y as (n_sample) but I cannot match the dimension, I am not sure what tensorflow do internaly.
Below you can find a reprroducible example with empty matrix, but my data is not empty, it is an integer representation of text.
X_train = np.zeros((16, 6760))
y_train = np.zeros((16))
train = tf.data.Dataset.from_tensor_slices((X_train, y_train))
# Prepare for tensorflow
BUFFER_SIZE = 10000
BATCH_SIZE = 64
VOCAB_SIZE = 5354
train = train.shuffle(BUFFER_SIZE)#.batch(BATCH_SIZE)
# Select index of interest in text
import tensorflow as tf
model = tf.keras.Sequential([
tf.keras.layers.Embedding(input_dim=VOCAB_SIZE, output_dim=64, mask_zero=False),
tf.keras.layers.Bidirectional(tf.keras.layers.GRU(64)),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(VOCAB_SIZE, activation='softmax'),
])
model.compile(loss="sparse_categorical_crossentropy",
# loss=tf.keras.losses.MeanAbsoluteError(),
optimizer=tf.keras.optimizers.Adam(1e-4),
metrics=['sparse_categorical_accuracy'])
history = model.fit(train, epochs=3,
)
ValueError Traceback (most recent call last)
<ipython-input-74-3a160a5713dd> in <module>
----> 1 history = model.fit(train, epochs=3,
2 # validation_data=test,
3 # validation_steps=30
4 )
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, max_queue_size, workers, use_multiprocessing,
**kwargs)
817 max_queue_size=max_queue_size,
818 workers=workers,
--> 819 use_multiprocessing=use_multiprocessing)
820
821 def evaluate(self,
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_v2.py in fit(self, model, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, max_queue_size, workers, use_multiprocessing,
**kwargs)
340 mode=ModeKeys.TRAIN,
341 training_context=training_context,
--> 342 total_epochs=epochs)
343 cbks.make_logs(model, epoch_logs, training_result, ModeKeys.TRAIN)
344
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_v2.py in run_one_epoch(model, iterator, execution_function, dataset_size, batch_size, strategy, steps_per_epoch, num_samples, mode, training_context, total_epochs)
126 step=step, mode=mode, size=current_batch_size) as batch_logs:
127 try:
--> 128 batch_outs = execution_function(iterator)
129 except (StopIteration, errors.OutOfRangeError):
130 # TODO(kaftan): File bug about tf function and errors.OutOfRangeError?
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_v2_utils.py in execution_function(input_fn)
96 # `numpy` translates Tensors to values in Eager mode.
97 return nest.map_structure(_non_none_constant_value,
---> 98 distributed_function(input_fn))
99
100 return execution_function
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/eager/def_function.py in __call__(self, *args, **kwds)
566 xla_context.Exit()
567 else:
--> 568 result = self._call(*args, **kwds)
569
570 if tracing_count == self._get_tracing_count():
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/eager/def_function.py in _call(self, *args, **kwds)
613 # This is the first call of __call__, so we have to initialize.
614 initializers = []
--> 615 self._initialize(args, kwds, add_initializers_to=initializers)
616 finally:
617 # At this point we know that the initialization is complete (or less
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
495 self._concrete_stateful_fn = (
496 self._stateful_fn._get_concrete_function_internal_garbage_collected(
# pylint: disable=protected-access
--> 497 *args, **kwds))
498
499 def invalid_creator_scope(*unused_args, **unused_kwds):
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args,
**kwargs)
2387 args, kwargs = None, None
2388 with self._lock:
-> 2389 graph_function, _, _ = self._maybe_define_function(args, kwargs)
2390 return graph_function
2391
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/eager/function.py in _maybe_define_function(self, args, kwargs)
2701
2702 self._function_cache.missed.add(call_context_key)
-> 2703 graph_function = self._create_graph_function(args, kwargs)
2704 self._function_cache.primary[cache_key] = graph_function
2705 return graph_function, args, kwargs
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
2591 arg_names=arg_names,
2592 override_flat_arg_shapes=override_flat_arg_shapes,
-> 2593 capture_by_value=self._capture_by_value),
2594 self._function_attributes,
2595 # Tell the ConcreteFunction to clean up its graph once it goes out of
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
976 converted_func)
977
--> 978 func_outputs = python_func(*func_args, **func_kwargs)
979
980 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/eager/def_function.py in wrapped_fn(*args, **kwds)
437 # __wrapped__ allows AutoGraph to swap in a converted function. We give
438 # the function a weak reference to itself to avoid a reference cycle.
--> 439 return weak_wrapped_fn().__wrapped__(*args, **kwds)
440 weak_wrapped_fn = weakref.ref(wrapped_fn)
441
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_v2_utils.py in distributed_function(input_iterator)
83 args = _prepare_feed_values(model, input_iterator, mode, strategy)
84 outputs = strategy.experimental_run_v2(
---> 85 per_replica_function, args=args)
86 # Out of PerReplica outputs reduce or pick values to return.
87 all_outputs = dist_utils.unwrap_output_dict(
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/distribute/distribute_lib.py in experimental_run_v2(self, fn, args, kwargs)
761 fn = autograph.tf_convert(fn, ag_ctx.control_status_ctx(),
762 convert_by_default=False)
--> 763 return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
764
765 def reduce(self, reduce_op, value, axis):
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/distribute/distribute_lib.py in call_for_each_replica(self, fn, args, kwargs)
1817 kwargs = {}
1818 with self._container_strategy().scope():
-> 1819 return self._call_for_each_replica(fn, args, kwargs)
1820
1821 def _call_for_each_replica(self, fn, args, kwargs):
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/distribute/distribute_lib.py in _call_for_each_replica(self, fn, args, kwargs)
2162 self._container_strategy(),
2163 replica_id_in_sync_group=constant_op.constant(0, dtypes.int32)):
-> 2164 return fn(*args, **kwargs)
2165
2166 def _reduce_to(self, reduce_op, value, destinations):
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/autograph/impl/api.py in wrapper(*args, **kwargs)
290 def wrapper(*args, **kwargs):
291 with ag_ctx.ControlStatusCtx(status=ag_ctx.Status.DISABLED):
--> 292 return func(*args, **kwargs)
293
294 if inspect.isfunction(func) or inspect.ismethod(func):
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_v2_utils.py in train_on_batch(model, x, y, sample_weight, class_weight, reset_metrics, standalone)
431 y,
432 sample_weights=sample_weights,
--> 433 output_loss_metrics=model._output_loss_metrics)
434
435 if reset_metrics:
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_eager.py in train_on_batch(model, inputs, targets, sample_weights, output_loss_metrics)
310 sample_weights=sample_weights,
311 training=True,
--> 312 output_loss_metrics=output_loss_metrics))
313 if not isinstance(outs, list):
314 outs = [outs]
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_eager.py in _process_single_batch(model, inputs, targets, output_loss_metrics, sample_weights, training)
251 output_loss_metrics=output_loss_metrics,
252 sample_weights=sample_weights,
--> 253 training=training))
254 if total_loss is None:
255 raise ValueError('The model cannot be run '
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_eager.py in _model_loss(model, inputs, targets, output_loss_metrics, sample_weights, training)
165
166 if hasattr(loss_fn, 'reduction'):
--> 167 per_sample_losses = loss_fn.call(targets[i], outs[i])
168 weighted_losses = losses_utils.compute_weighted_loss(
169 per_sample_losses,
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/keras/losses.py in call(self, y_true, y_pred)
219 y_pred, y_true = tf_losses_util.squeeze_or_expand_dimensions(
220 y_pred, y_true)
--> 221 return self.fn(y_true, y_pred, **self._fn_kwargs)
222
223 def get_config(self):
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/keras/losses.py in sparse_categorical_crossentropy(y_true, y_pred, from_logits, axis)
976 def sparse_categorical_crossentropy(y_true, y_pred, from_logits=False, axis=-1):
977 return K.sparse_categorical_crossentropy(
--> 978 y_true, y_pred, from_logits=from_logits, axis=axis)
979
980
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/keras/backend.py in sparse_categorical_crossentropy(target, output, from_logits, axis)
4571 with get_graph().as_default():
4572 res = nn.sparse_softmax_cross_entropy_with_logits_v2(
-> 4573 labels=target, logits=output)
4574 else:
4575 res = nn.sparse_softmax_cross_entropy_with_logits_v2(
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py in sparse_softmax_cross_entropy_with_logits_v2(labels, logits, name)
3535 """
3536 return sparse_softmax_cross_entropy_with_logits(
-> 3537 labels=labels, logits=logits, name=name)
3538
3539
/opt/conda/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py in sparse_softmax_cross_entropy_with_logits(_sentinel, labels, logits, name)
3451 "should equal the shape of logits except for the last "
3452 "dimension (received %s)." % (labels_static_shape,
-> 3453 logits.get_shape()))
3454 # Check if no reshapes are required.
3455 if logits.get_shape().ndims == 2:
ValueError: Shape mismatch: The shape of labels (received (1,)) should equal the shape of logits except for the last dimension (received (6760, 5354)).
this works for me in Tensorflow 2.0.
import numpy as np
# Prepare for tensorflow
BUFFER_SIZE = 10000
BATCH_SIZE = 64
VOCAB_SIZE = 5354
X_train = np.zeros((16,6760))
y_train = np.zeros((16,1)) # This is changed
train = tf.data.Dataset.from_tensor_slices((X_train, y_train))
train = train.shuffle(BUFFER_SIZE).batch(8) # This is changed
# Select index of interest in text
model = tf.keras.Sequential([
tf.keras.layers.Embedding(input_dim=VOCAB_SIZE, output_dim=64,input_length= 6760, mask_zero=False),
tf.keras.layers.Bidirectional(tf.keras.layers.GRU(64)),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(VOCAB_SIZE, activation='softmax'),
])
print(model.summary())
model.compile(loss="sparse_categorical_crossentropy",
# loss=tf.keras.losses.MeanAbsoluteError(),
optimizer=tf.keras.optimizers.Adam(1e-4),
metrics=['sparse_categorical_accuracy'])
history = model.fit(train, epochs=3)
For those with the same problem, I didn't understood immediatly the change of rajesh, the problem was the absence of batch dimension.
I replaced :
train = train.shuffle(BUFFER_SIZE) #.batch(BATCH_SIZE)
with (uncommented the "batch") :
train = train.shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
and it worked.
I have what would appear to be a pretty straightforward model fitting process in a Jupyter notebook:
model = tf.keras.Sequential([
feature_layer,
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(7, activation='softmax')
])
model.compile(loss='sparse_categorical_crossentropy',
optimizer=tf.keras.optimizers.Adamax(),
metrics=['accuracy'])
model.fit(train_ds, validation_data=val_ds,
epochs=1, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='accuracy', patience=10)])
When fitting is complete, I call tf.keras.models.save_model(model) (I have also tried model.save(path)), which saves successfully only if the notebook kernel has been loaded from scratch, otherwise it fails. For example if I run:
tf.keras.backend.clear_session()
tf.compat.v1.reset_default_graph()
del(model)
And try to execute the first block of code again, without reseting the kernel, I will get:
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-29-d8fda83a42df> in <module>
----> 1 model.save('test3')
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/keras/engine/network.py in save(self, filepath, overwrite, include_optimizer, save_format, signatures, options)
1006 """
1007 save.save_model(self, filepath, overwrite, include_optimizer, save_format,
-> 1008 signatures, options)
1009
1010 def save_weights(self, filepath, overwrite=True, save_format=None):
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/keras/saving/save.py in save_model(model, filepath, overwrite, include_optimizer, save_format, signatures, options)
113 else:
114 saved_model_save.save(model, filepath, overwrite, include_optimizer,
--> 115 signatures, options)
116
117
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/keras/saving/saved_model/save.py in save(model, filepath, overwrite, include_optimizer, signatures, options)
76 # we use the default replica context here.
77 with distribution_strategy_context._get_default_replica_context(): # pylint: disable=protected-access
---> 78 save_lib.save(model, filepath, signatures, options)
79
80 if not include_optimizer:
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/saved_model/save.py in save(obj, export_dir, signatures, options)
897 # Note we run this twice since, while constructing the view the first time
898 # there can be side effects of creating variables.
--> 899 _ = _SaveableView(checkpoint_graph_view)
900 saveable_view = _SaveableView(checkpoint_graph_view)
901
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/saved_model/save.py in __init__(self, checkpoint_view)
185 # variables on first run.
186 concrete_functions = (
--> 187 function._list_all_concrete_functions_for_serialization()) # pylint: disable=protected-access
188 else:
189 concrete_functions = [function]
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/eager/def_function.py in _list_all_concrete_functions_for_serialization(self)
797 """
798 if self.input_signature is not None:
--> 799 self.get_concrete_function()
800 concrete_functions = []
801 # pylint: disable=protected-access
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/eager/def_function.py in get_concrete_function(self, *args, **kwargs)
907 if self._stateful_fn is None:
908 initializers = []
--> 909 self._initialize(args, kwargs, add_initializers_to=initializers)
910 self._initialize_uninitialized_variables(initializers)
911
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
495 self._concrete_stateful_fn = (
496 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
--> 497 *args, **kwds))
498
499 def invalid_creator_scope(*unused_args, **unused_kwds):
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
2387 args, kwargs = None, None
2388 with self._lock:
-> 2389 graph_function, _, _ = self._maybe_define_function(args, kwargs)
2390 return graph_function
2391
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/eager/function.py in _maybe_define_function(self, args, kwargs)
2701
2702 self._function_cache.missed.add(call_context_key)
-> 2703 graph_function = self._create_graph_function(args, kwargs)
2704 self._function_cache.primary[cache_key] = graph_function
2705 return graph_function, args, kwargs
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
2591 arg_names=arg_names,
2592 override_flat_arg_shapes=override_flat_arg_shapes,
-> 2593 capture_by_value=self._capture_by_value),
2594 self._function_attributes,
2595 # Tell the ConcreteFunction to clean up its graph once it goes out of
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
976 converted_func)
977
--> 978 func_outputs = python_func(*func_args, **func_kwargs)
979
980 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/eager/def_function.py in wrapped_fn(*args, **kwds)
437 # __wrapped__ allows AutoGraph to swap in a converted function. We give
438 # the function a weak reference to itself to avoid a reference cycle.
--> 439 return weak_wrapped_fn().__wrapped__(*args, **kwds)
440 weak_wrapped_fn = weakref.ref(wrapped_fn)
441
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/training/tracking/tracking.py in _initializer()
242 #def_function.function(input_signature=[], autograph=False)
243 def _initializer():
--> 244 self._initialize()
245 return 1 # Dummy return
246
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/ops/lookup_ops.py in _initialize(self)
179
180 def _initialize(self):
--> 181 return self._initializer.initialize(self)
182
183 #property
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/ops/lookup_ops.py in initialize(self, table)
462 self._name, values=(table.resource_handle, self._keys, self._values)):
463 init_op = gen_lookup_ops.lookup_table_import_v2(table.resource_handle,
--> 464 self._keys, self._values)
465 ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)
466 return init_op
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/ops/gen_lookup_ops.py in lookup_table_import_v2(table_handle, keys, values, name)
700 _, _, _op, _outputs = _op_def_library._apply_op_helper(
701 "LookupTableImportV2", table_handle=table_handle, keys=keys,
--> 702 values=values, name=name)
703 return _op
704 LookupTableImportV2 = tf_export("raw_ops.LookupTableImportV2")(_ops.to_raw_op(lookup_table_import_v2))
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/framework/op_def_library.py in _apply_op_helper(op_type_name, name, **keywords)
740 op = g._create_op_internal(op_type_name, inputs, dtypes=None,
741 name=scope, input_types=input_types,
--> 742 attrs=attr_protos, op_def=op_def)
743
744 # `outputs` is returned as a separate return value so that the output
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/framework/func_graph.py in _create_op_internal(self, op_type, inputs, dtypes, input_types, name, attrs, op_def, compute_device)
589 if ctxt is not None and hasattr(ctxt, "AddValue"):
590 inp = ctxt.AddValue(inp)
--> 591 inp = self.capture(inp)
592 inputs[i] = inp
593 return super(FuncGraph, self)._create_op_internal( # pylint: disable=protected-access
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/framework/func_graph.py in capture(self, tensor, name, shape)
628 # Large EagerTensors and resources are captured with Placeholder ops
629 return self._capture_helper(tensor, name, shape)
--> 630 if tensor.graph is not self:
631 if name is None:
632 name = tensor.op.name
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/framework/ops.py in graph(self)
414 def graph(self):
415 """The `Graph` that contains this tensor."""
--> 416 return self._op.graph
417
418 #property
/usr/local/lib/python3.5/dist-packages/tensorflow_core/python/framework/ops.py in graph(self)
2223 def graph(self):
2224 """The `Graph` that contains this operation."""
-> 2225 return self._graph
2226
2227 #property
AttributeError: 'Operation' object has no attribute '_graph'
Any help would be appreciated.
I have recreated your code and successfully ran it without any errors. Kindly use the latest stable release of TensorFlow in this case 2.1.0.
If the problem still persists, try updating your Jupyter Notebook, Python (to the latest supported version), and or try simulating it first in Google Colab to ensure that this is not related to any hardware issues.
Screenshot of Successful execution:
Here is my code:
import tensorflow as tf
print('TensorFlow Version: {}'.format(tf.__version__))
# Synthetic Data
x, y = tf.random.uniform(shape=(1000, 7, 1), maxval = 1), tf.random.uniform(shape=(1000, 7,1), maxval =1)
# Model
feature_layer = tf.keras.layers.Input(shape = (7,1))
model = tf.keras.models.Sequential([
feature_layer,
tf.keras.layers.Dense(128, activation='relu', input_shape = (7,1)),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(7, activation='softmax')
])
model.compile(loss='sparse_categorical_crossentropy', optimizer=tf.keras.optimizers.Adamax())
model.fit(x, y, epochs=1)
# 1st Save
first_model_loc = '/tmp/tf_keras_1st_run/'
tf.keras.models.save_model(model, filepath= first_model_loc)
print('\nSuccessfully saved at {}\n\n'.format(first_model_loc)) # Progress Checking
#
tf.keras.backend.clear_session()
tf.compat.v1.reset_default_graph()
del(model)
#
# Model
feature_layer = tf.keras.layers.Input(shape = (7,1))
model = tf.keras.models.Sequential([
feature_layer,
tf.keras.layers.Dense(128, activation='relu', input_shape = (7,1)),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(7, activation='softmax')
])
model.compile(loss='sparse_categorical_crossentropy', optimizer=tf.keras.optimizers.Adamax())
model.fit(x, y, epochs=1)
# 2nd Save
second_model_loc = '/tmp/tf_keras_2nd_run/'
tf.keras.models.save_model(model, filepath=second_model_loc)
print('\nSuccessfully saved at {}'.format(second_model_loc)) # Progress Checking