I want to evaluate the ROC AUC for my multiclass sequential Keras model using the multiclass_roc_auc_score function. My code raised ValueError: Shapes (None, 1) and (None, 4) are incompatible.
I want to perform multiclass classification:
class MulticlassTruePositives(tf.keras.metrics.Metric):
def __init__(self, name='multiclass_true_positives', **kwargs):
super(MulticlassTruePositives, self).__init__(name=name, **kwargs)
self.true_positives = self.add_weight(name='tp', initializer='zeros')
def update_state(self, y_true, y_pred, sample_weight=None):
y_pred = tf.reshape(tf.argmax(y_pred, axis=1), shape=(-1, 1))
values = tf.cast(y_true, 'int32') == tf.cast(y_pred, 'int32')
values = tf.cast(values, 'float32')
if sample_weight is not None:
sample_weight = tf.cast(sample_weight, 'float32')
values = tf.multiply(values, sample_weight)
self.true_positives.assign_add(tf.reduce_sum(values))
def result(self):
return self.true_positives
def reset_states(self):
# The state of the metric will be reset at the start of each epoch.
self.true_positives.assign(0.)
I compile the Keras model with the metrics:
# Report the AUC of a model outputting a probability.
hypermodel.compile(optimizer='sgd',
loss=tf.keras.losses.CategoricalCrossentropy(),
metrics=[tf.keras.metrics.AUC(), MulticlassTruePositives()])
I implement Keras callback that plots the ROC curve and Confusion Matrix to a folder:
class PerformanceVisualizationCallback(Callback):
def __init__(self, model, test_data, image_dir):
super().__init__()
self.model = model
self.test_data = test_data
os.makedirs(image_dir, exist_ok=True)
self.image_dir = image_dir
def on_epoch_end(self, epoch, logs={}):
y_pred = np.asarray(self.model.predict(self.test_data[0]))
y_true = self.test_data[1]
y_pred_class = np.argmax(y_pred, axis=1)
# plot and save confusion matrix
fig, ax = plt.subplots(figsize=(16,12))
plot_confusion_matrix(y_true, y_pred_class, ax=ax)
fig.savefig(os.path.join(self.image_dir, f'confusion_matrix_epoch_{epoch}'))
# plot and save roc curve
fig, ax = plt.subplots(figsize=(16,12))
plot_roc(y_true, y_pred, ax=ax)
fig.savefig(os.path.join(self.image_dir, f'roc_curve_epoch_{epoch}'))
performance_viz_cbk = PerformanceVisualizationCallback(
model=model,
test_data=X_test,
image_dir='perorfmance_charts')
history = hypermodel.fit(x=X_train,
y=y_train,
epochs=5,
callbacks=[performance_viz_cbk])
Traceback:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/tmp/ipykernel_17/963709483.py in <module>
2 y=y_train,
3 epochs=5,
----> 4 callbacks=[performance_viz_cbk])
/opt/conda/lib/python3.7/site-packages/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
1182 _r=1):
1183 callbacks.on_train_batch_begin(step)
-> 1184 tmp_logs = self.train_function(iterator)
1185 if data_handler.should_sync:
1186 context.async_wait()
/opt/conda/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
883
884 with OptionalXlaContext(self._jit_compile):
--> 885 result = self._call(*args, **kwds)
886
887 new_tracing_count = self.experimental_get_tracing_count()
/opt/conda/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds)
922 # In this case we have not created variables on the first call. So we can
923 # run the first trace but we should fail if variables are created.
--> 924 results = self._stateful_fn(*args, **kwds)
925 if self._created_variables and not ALLOW_DYNAMIC_VARIABLE_CREATION:
926 raise ValueError("Creating variables on a non-first call to a function"
/opt/conda/lib/python3.7/site-packages/tensorflow/python/eager/function.py in __call__(self, *args, **kwargs)
3036 with self._lock:
3037 (graph_function,
-> 3038 filtered_flat_args) = self._maybe_define_function(args, kwargs)
3039 return graph_function._call_flat(
3040 filtered_flat_args, captured_inputs=graph_function.captured_inputs) # pylint: disable=protected-access
/opt/conda/lib/python3.7/site-packages/tensorflow/python/eager/function.py in _maybe_define_function(self, args, kwargs)
3458 call_context_key in self._function_cache.missed):
3459 return self._define_function_with_shape_relaxation(
-> 3460 args, kwargs, flat_args, filtered_flat_args, cache_key_context)
3461
3462 self._function_cache.missed.add(call_context_key)
/opt/conda/lib/python3.7/site-packages/tensorflow/python/eager/function.py in _define_function_with_shape_relaxation(self, args, kwargs, flat_args, filtered_flat_args, cache_key_context)
3380
3381 graph_function = self._create_graph_function(
-> 3382 args, kwargs, override_flat_arg_shapes=relaxed_arg_shapes)
3383 self._function_cache.arg_relaxed[rank_only_cache_key] = graph_function
3384
/opt/conda/lib/python3.7/site-packages/tensorflow/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
3306 arg_names=arg_names,
3307 override_flat_arg_shapes=override_flat_arg_shapes,
-> 3308 capture_by_value=self._capture_by_value),
3309 self._function_attributes,
3310 function_spec=self.function_spec,
/opt/conda/lib/python3.7/site-packages/tensorflow/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes, acd_record_initial_resource_uses)
1005 _, original_func = tf_decorator.unwrap(python_func)
1006
-> 1007 func_outputs = python_func(*func_args, **func_kwargs)
1008
1009 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
/opt/conda/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py in wrapped_fn(*args, **kwds)
666 # the function a weak reference to itself to avoid a reference cycle.
667 with OptionalXlaContext(compile_with_xla):
--> 668 out = weak_wrapped_fn().__wrapped__(*args, **kwds)
669 return out
670
/opt/conda/lib/python3.7/site-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
992 except Exception as e: # pylint:disable=broad-except
993 if hasattr(e, "ag_error_metadata"):
--> 994 raise e.ag_error_metadata.to_exception(e)
995 else:
996 raise
ValueError: in user code:
/opt/conda/lib/python3.7/site-packages/keras/engine/training.py:853 train_function *
return step_function(self, iterator)
/opt/conda/lib/python3.7/site-packages/keras/engine/training.py:842 step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
/opt/conda/lib/python3.7/site-packages/tensorflow/python/distribute/distribute_lib.py:1286 run
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
/opt/conda/lib/python3.7/site-packages/tensorflow/python/distribute/distribute_lib.py:2849 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
/opt/conda/lib/python3.7/site-packages/tensorflow/python/distribute/distribute_lib.py:3632 _call_for_each_replica
return fn(*args, **kwargs)
/opt/conda/lib/python3.7/site-packages/keras/engine/training.py:835 run_step **
outputs = model.train_step(data)
/opt/conda/lib/python3.7/site-packages/keras/engine/training.py:789 train_step
y, y_pred, sample_weight, regularization_losses=self.losses)
/opt/conda/lib/python3.7/site-packages/keras/engine/compile_utils.py:201 __call__
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
/opt/conda/lib/python3.7/site-packages/keras/losses.py:141 __call__
losses = call_fn(y_true, y_pred)
/opt/conda/lib/python3.7/site-packages/keras/losses.py:245 call **
return ag_fn(y_true, y_pred, **self._fn_kwargs)
/opt/conda/lib/python3.7/site-packages/tensorflow/python/util/dispatch.py:206 wrapper
return target(*args, **kwargs)
/opt/conda/lib/python3.7/site-packages/keras/losses.py:1666 categorical_crossentropy
y_true, y_pred, from_logits=from_logits, axis=axis)
/opt/conda/lib/python3.7/site-packages/tensorflow/python/util/dispatch.py:206 wrapper
return target(*args, **kwargs)
/opt/conda/lib/python3.7/site-packages/keras/backend.py:4839 categorical_crossentropy
target.shape.assert_is_compatible_with(output.shape)
/opt/conda/lib/python3.7/site-packages/tensorflow/python/framework/tensor_shape.py:1161 assert_is_compatible_with
raise ValueError("Shapes %s and %s are incompatible" % (self, other))
ValueError: Shapes (None, 1) and (None, 4) are incompatible
I took time to make your display graph make sense with my sample data.
Sample: First of all you need to create dimension output matching to your display calculations matrix see loss Fn, Optimizer, Matrix, and the network output. Then you need to create senses of predict data, they are not allowed multiple input value then I take time to make it significant as a sample. Max or Sum is significant
Sum of all Max is not significant as all priority in the my queue.
import os
from os.path import exists
import tensorflow as tf
import tensorflow_text as tft
import matplotlib.pyplot as plt
import sklearn.metrics
from sklearn.svm import SVC
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
[PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]
None
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
config = tf.config.experimental.set_memory_growth(physical_devices[0], True)
print(physical_devices)
print(config)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
list_accuracy = []
checkpoint_path = "F:\\models\\checkpoint\\" + os.path.basename(__file__).split('.')[0] + "\\TF_DataSets_01.h5"
checkpoint_dir = os.path.dirname(checkpoint_path)
if not exists(checkpoint_dir) :
os.mkdir(checkpoint_dir)
print("Create directory: " + checkpoint_dir)
input_word = tf.constant(' \'Cause it\'s easy as an ice cream sundae Slipping outta your hand into the dirt Easy as an ice cream sundae Every dancer gets a little hurt Easy as an ice cream sundae Slipping outta your hand into the dirt Easy as an ice cream sundae Every dancer gets a little hurt Easy as an ice cream sundae Oh, easy as an ice cream sundae ')
dataset = tf.data.Dataset.from_tensors( tf.strings.bytes_split(input_word) )
window_size = 6
dataset = dataset.map( lambda x: tft.sliding_window(x, width=window_size, axis=0) ).flat_map(tf.data.Dataset.from_tensor_slices)
dataset = dataset.batch(1)
list_word = []
label = []
vocab = [ "a", "b", "c", "d", "e", "f", "g", "h", "I", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "_" ]
vocab_hot = [ "ice" ]
layer = tf.keras.layers.StringLookup(vocabulary=vocab)
layer_hot = tf.keras.layers.StringLookup(vocabulary=vocab_hot)
for example in dataset.take(200):
sequences_mapping_string = layer(example[0])
sequences_mapping_string = tf.constant( sequences_mapping_string, shape=(1, 6) )
list_word.append(sequences_mapping_string.numpy())
sequences_mapping_string = tf.reduce_sum(layer_hot( example[0][0] + example[0][1] + example[0][2] ))
sequences_mapping_string = tf.constant( sequences_mapping_string, shape=(1, 1) )
label.append(sequences_mapping_string.numpy())
list_word = tf.constant(list_word, shape=(200, 1, 6, 1), dtype=tf.int64)
label = tf.constant(label, shape=(200, 1, 1, 1), dtype=tf.int64)
dataset = tf.data.Dataset.from_tensor_slices((list_word, label))
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Class / Definition
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
class MulticlassTruePositives(tf.keras.metrics.Metric):
def __init__(self, name='multiclass_true_positives', **kwargs):
super(MulticlassTruePositives, self).__init__(name=name, **kwargs)
self.true_positives = self.add_weight(name='tp', initializer='zeros')
def update_state(self, y_true, y_pred, sample_weight=None):
y_pred = tf.reshape(tf.argmax(y_pred, axis=1), shape=(-1, 1))
values = tf.cast(y_true, 'int32') == tf.cast(y_pred, 'int32')
values = tf.cast(values, 'float32')
if sample_weight is not None:
sample_weight = tf.cast(sample_weight, 'float32')
values = tf.multiply(values, sample_weight)
self.true_positives.assign_add(tf.reduce_sum(values))
def result(self):
return self.true_positives
def reset_state(self):
# The state of the metric will be reset at the start of each epoch.
self.true_positives.assign(0.)
class MyLSTMLayer( tf.keras.layers.LSTM ):
def __init__(self, units, return_sequences, return_state):
super(MyLSTMLayer, self).__init__( units, return_sequences=True, return_state=False )
self.num_units = units
def build(self, input_shape):
self.kernel = self.add_weight("kernel",
shape=[int(input_shape[-1]),
self.num_units])
def call(self, inputs):
return tf.matmul(inputs, self.kernel)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Initialize
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
mycustomlayer = MyLSTMLayer( 64, True, False )
mycustomlayer_2 = MyLSTMLayer( 16, True, False )
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=(6, 1)),
tf.keras.layers.Embedding(1000, 128, input_length=1),
tf.keras.layers.Reshape(( 6, 128 )),
tf.keras.layers.SpatialDropout1D( rate = 0.4 ),
tf.keras.layers.Conv1D(32, 6, activation="relu"),
tf.keras.layers.MaxPooling1D(strides=1, pool_size=1),
### LSTM
mycustomlayer,
tf.keras.layers.Reshape(( 1, 1, 64 )),
tf.keras.layers.UpSampling2D( size=(4, 4), data_format=None, interpolation='nearest' ),
tf.keras.layers.Conv1D(16, 3, activation="relu"),
tf.keras.layers.Reshape(( 8, 16 )),
tf.keras.layers.MaxPooling1D(),
tf.keras.layers.GlobalMaxPooling1D(),
### LSTM
tf.keras.layers.Reshape(( 1, 16 )),
mycustomlayer_2,
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Dense(128, activation="relu"),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(1),
], name="MyModelClassification")
model.build()
model.summary()
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Callback
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
class PerformanceVisualizationCallback(tf.keras.callbacks.Callback):
def __init__(self, model, test_data, image_dir):
super().__init__()
self.model = model
self.test_data = test_data
self.test_data = tf.constant( self.test_data, shape=(20, 1, 6, 1) )
os.makedirs(image_dir, exist_ok=True)
self.image_dir = image_dir
def on_epoch_end(self, epoch, logs={}):
y_pred = tf.constant(self.model.predict(self.test_data[0])).numpy()
y_true = self.test_data[1]
y_pred_class = tf.math.argmax(y_pred, axis=1).numpy()
clf = SVC(random_state=0)
clf.fit(tf.constant( self.test_data, shape=(20, 6) ).numpy(), tf.cast( tf.linspace( 0, 19, 20, name='linspace', axis=0 ), dtype=tf.int64 ).numpy())
predictions = clf.predict(tf.constant( self.test_data, shape=(20, 6) ).numpy())
cm = sklearn.metrics.confusion_matrix(
[tf.math.argmax(self.test_data[1], axis=1).numpy()[0], tf.math.argmax(self.test_data[2], axis=1).numpy()[0],
tf.math.argmax(self.test_data[3], axis=1).numpy()[0], tf.math.argmax(self.test_data[4], axis=1).numpy()[0], tf.math.argmax(self.test_data[5], axis=1).numpy()[0]],
[1, 2, 3, 4, 5], labels=clf.classes_)
disp = sklearn.metrics.ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=clf.classes_)
disp.plot()
plt.show()
# fig.savefig(os.path.join(self.image_dir, f'confusion_matrix_epoch_{epoch}'))
clf = sklearn.svm.SVC(random_state=0)
clf.fit(tf.constant( self.test_data, shape=(20, 6) ).numpy(), tf.linspace( 0, 19, 20, name='linspace', axis=0 ).numpy())
if (epoch <= 2):
list_accuracy.append( logs['accuracy'] )
if (epoch == 2):
fpr, tpr, thresholds = sklearn.metrics.roc_curve([0, 0, 1, 1], [0, list_accuracy[0], list_accuracy[1], list_accuracy[2]])
roc_auc = sklearn.metrics.auc(fpr, tpr)
display = sklearn.metrics.RocCurveDisplay(fpr=fpr, tpr=tpr, roc_auc=roc_auc, estimator_name='example estimator')
display.plot()
plt.show()
# fig.savefig(os.path.join(self.image_dir, f'roc_curve_epoch_{epoch}'))
list_word = []
label = []
test_dataset = tf.data.Dataset.from_tensors( tf.strings.bytes_split(input_word) )
test_dataset = test_dataset.map( lambda x: tft.sliding_window(x, width=window_size, axis=0) ).flat_map(tf.data.Dataset.from_tensor_slices)
test_dataset = test_dataset.batch(1)
for example in test_dataset.take(20):
sequences_mapping_string = layer(example[0])
sequences_mapping_string = tf.constant( sequences_mapping_string, shape=(1, 6) )
list_word.append(sequences_mapping_string.numpy())
sequences_mapping_string = tf.reduce_sum(layer_hot( example[0][0] + example[0][1] + example[0][2] ))
sequences_mapping_string = tf.constant( sequences_mapping_string, shape=(1, 1) )
label.append(sequences_mapping_string.numpy())
list_word = tf.constant(list_word, shape=(20, 1, 6, 1), dtype=tf.int64)
label = tf.constant(label, shape=(20, 1, 1, 1), dtype=tf.int64)
test_dataset = tf.data.Dataset.from_tensor_slices((list_word, label))
performance_viz_cbk = PerformanceVisualizationCallback(
model=model,
test_data=list_word,
image_dir='c:\perorfmance_charts')
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Optimizer
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
optimizer = tf.keras.optimizers.SGD(
learning_rate=0.000001,
momentum=0.5,
nesterov=True,
name='SGD',
)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Loss Fn
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
lossfn = tf.keras.losses.BinaryCrossentropy(
from_logits=False,
reduction=tf.keras.losses.Reduction.AUTO,
name='sparse_categorical_crossentropy'
)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Summary
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
# Report the AUC of a model outputting a probability.
model.compile(optimizer=optimizer, loss=lossfn,
metrics=['accuracy', tf.keras.metrics.AUC(), MulticlassTruePositives()])
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: FileWriter
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
if exists(checkpoint_path) :
model.load_weights(checkpoint_path)
print("model load: " + checkpoint_path)
input("Press Any Key!")
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Training
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
history = model.fit(dataset, batch_size=100, epochs=3, callbacks=[performance_viz_cbk] )
Related
İ am working on an image dataset that is categorical 12 classes. İ am using transfer learning with VGG16. However, İ have faced an error: Shapes (None, None) and (None, 28, 28, 12) are incompatible.
My code:
IMAGE_SHAPE = (224, 224)
BATCH_SIZE = 32
train = ImageDataGenerator()
train_generator = tf.keras.preprocessing.image.ImageDataGenerator(rescale= 1./255, fill_mode= 'nearest')
train_data = train_generator.flow_from_directory(directory="path",target_size=IMAGE_SHAPE , color_mode="rgb" , class_mode='categorical', batch_size=BATCH_SIZE , shuffle = True )
valid = ImageDataGenerator()
validation_generator = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1./255)
valid_data = validation_generator.flow_from_directory(directory="path, target_size=IMAGE_SHAPE , color_mode="rgb" , class_mode='categorical' , batch_size=BATCH_SIZE , shuffle = True )
test = ImageDataGenerator()
test_generator = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1./255)
test_data = test_generator.flow_from_directory(directory='path',target_size=IMAGE_SHAPE , color_mode="rgb" , class_mode='categorical' , batch_size=1 , shuffle = False )
test_data.reset()
from keras.applications.vgg19 import VGG19
vggmodel = VGG19(weights='imagenet', include_top=True)
for layers in (vggmodel.layers)[:32]:
print(layers)
layers.trainable = False
import tensorflow as tf
tf.keras.Model
tf.keras.layers.Dense
from keras import optimizers
tf.keras.preprocessing.image.ImageDataGenerator
tf.keras.preprocessing.image.load_img
X= vggmodel.layers[-12].output
flatten = tf.keras.layers.Flatten()(X)
out = vggmodel.layers(flatten)
predictions = tf.keras.layers.Dense(num_classes, activation='softmax')
model_final = Model(vggmodel.input, predictions)
model_final.compile(optimizer = optimizers.Adam(lr=0.0003), loss='categorical_crossentropy', metrics=["accuracy"])
for image_batch, labels_batch in train_data:
print(image_batch.shape)
print(labels_batch.shape)
break
from keras.callbacks import ModelCheckpoint, EarlyStopping
checkpoint = ModelCheckpoint("vgg16_1.h5", monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
early = EarlyStopping(monitor='val_acc',patience=40, verbose=1, mode='auto')
model_final.fit_generator(generator= train_data, steps_per_epoch= 2, epochs= 100, validation_data= valid_data, validation_steps=1, callbacks=[checkpoint,early])
model_final.save_weights("vgg16_1.h5")
Error: ValueError: Shapes (None, None) and (None, 28, 28, 12) are incompatible[![enter image description here]
Error details:
ValueError Traceback (most recent call last)
<ipython-input-39-938295cc34c4> in <module>()
2 checkpoint = ModelCheckpoint("vgg16_1.h5", monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
3 early = EarlyStopping(monitor='val_acc', min_delta=0, patience=40, verbose=1, mode='auto')
----> 4 model_final.fit_generator(generator= train_images , steps_per_epoch= 2, epochs= 100, validation_data= val_images , validation_steps=1, callbacks=[checkpoint,early])
5 model_final.save_weights("vgg16_1.h5")
10 frames
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
984 except Exception as e: # pylint:disable=broad-except
985 if hasattr(e, "ag_error_metadata"):
--> 986 raise e.ag_error_metadata.to_exception(e)
987 else:
988 raise
ValueError: in user code:
/usr/local/lib/python3.7/dist-packages/keras/engine/training.py:830 train_function *
return step_function(self, iterator)
/usr/local/lib/python3.7/dist-packages/keras/engine/training.py:813 run_step *
outputs = model.train_step(data)
/usr/local/lib/python3.7/dist-packages/keras/engine/training.py:771 train_step *
loss = self.compiled_loss(
/usr/local/lib/python3.7/dist-packages/keras/engine/compile_utils.py:201 __call__ *
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
/usr/local/lib/python3.7/dist-packages/keras/losses.py:142 __call__ *
losses = call_fn(y_true, y_pred)
/usr/local/lib/python3.7/dist-packages/keras/losses.py:246 call *
return ag_fn(y_true, y_pred, **self._fn_kwargs)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/util/dispatch.py:206 wrapper **
return target(*args, **kwargs)
/usr/local/lib/python3.7/dist-packages/keras/losses.py:1631 categorical_crossentropy
y_true, y_pred, from_logits=from_logits)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/util/dispatch.py:206 wrapper
return target(*args, **kwargs)
/usr/local/lib/python3.7/dist-packages/keras/backend.py:4827 categorical_crossentropy
target.shape.assert_is_compatible_with(output.shape)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/tensor_shape.py:1161 assert_is_compatible_with
raise ValueError("Shapes %s and %s are incompatible" % (self, other))
ValueError: Shapes (None, None) and (None, 28, 28, 12) are incompatible
1]1
Updated Code:
image_dir = Path('../content/dataset')
# Get filepaths and labels
filepaths = list(image_dir.glob(r'**/*.png'))
labels = list(map(lambda x: os.path.split(os.path.split(x)[0])[1], filepaths))
def create_gen():
# Load the Images with a generator and Data Augmentation
train_generator = tf.keras.preprocessing.image.ImageDataGenerator(
preprocessing_function=tf.keras.applications.vgg16.preprocess_input,
validation_split=0.1
)
test_generator = tf.keras.preprocessing.image.ImageDataGenerator(
preprocessing_function=tf.keras.applications.vgg16.preprocess_input
)
train_images = train_generator.flow_from_dataframe(
dataframe=train_df,
x_col='Filepath',
y_col='Label',
target_size=(224, 224),
color_mode='rgb',
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=0,
subset='training',
rotation_range=30, # Uncomment to use data augmentation
zoom_range=0.15,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.15,
horizontal_flip=True,
fill_mode="nearest"
)
val_images = train_generator.flow_from_dataframe(
dataframe=train_df,
x_col='Filepath',
y_col='Label',
target_size=(224, 224),
color_mode='rgb',
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=0,
subset='validation',
rotation_range=30, # Uncomment to use data augmentation
zoom_range=0.15,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.15,
horizontal_flip=True,
fill_mode="nearest"
)
test_images = test_generator.flow_from_dataframe(
dataframe=test_df,
x_col='Filepath',
y_col='Label',
target_size=(224, 224),
color_mode='rgb',
class_mode='categorical',
batch_size=32,
shuffle=False
)
return train_generator,test_generator,train_images,val_images,test_images
from keras.applications.vgg16 import VGG16
vggmodel = VGG16(weights='imagenet', include_top=True)
for layers in (vggmodel.layers)[:256]:
print(layers)
layers.trainable = False
X= vggmodel.layers[-12].output
predictions = Dense(12, activation="softmax")(X)
model_final = Model(vggmodel.input, predictions)
model_final.compile(optimizer = optimizers.Adam(lr=0.0003), loss='categorical_crossentropy', metrics=["accuracy"])
# Separate in train and test data
train_df, test_df = train_test_split(image_df, train_size=0.9, shuffle=True, random_state=1)
# Create the generators
train_generator,test_generator,train_images,val_images,test_images = create_gen()
from keras.callbacks import ModelCheckpoint, EarlyStopping
checkpoint = ModelCheckpoint("vgg16_1.h5", monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
early = EarlyStopping(monitor='val_acc', min_delta=0, patience=40, verbose=1, mode='auto')
model_final.fit_generator(generator= train_images, steps_per_epoch= 2, epochs= 100, validation_data= val_images, validation_steps=1, callbacks=[checkpoint,early])
model_final.save_weights("vgg16_1.h5")
Details error:
valueError Traceback (most recent call last)
<ipython-input-56-5210d7f2da32> in <module>()
2 checkpoint = ModelCheckpoint("vgg16_1.h5", monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
3 early = EarlyStopping(monitor='val_acc', min_delta=0, patience=40, verbose=1, mode='auto')
----> 4 model_final.fit_generator(generator= train_images, steps_per_epoch= 2, epochs= 100, validation_data= val_images, validation_steps=1, callbacks=[checkpoint,early])
5 model_final.save_weights("vgg16_1.h5")
10 frames
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
984 except Exception as e: # pylint:disable=broad-except
985 if hasattr(e, "ag_error_metadata"):
--> 986 raise e.ag_error_metadata.to_exception(e)
987 else:
988 raise
ValueError: in user code:
/usr/local/lib/python3.7/dist-packages/keras/engine/training.py:830 train_function *
return step_function(self, iterator)
/usr/local/lib/python3.7/dist-packages/keras/engine/training.py:813 run_step *
outputs = model.train_step(data)
/usr/local/lib/python3.7/dist-packages/keras/engine/training.py:771 train_step *
loss = self.compiled_loss(
/usr/local/lib/python3.7/dist-packages/keras/engine/compile_utils.py:201 __call__ *
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
/usr/local/lib/python3.7/dist-packages/keras/losses.py:142 __call__ *
losses = call_fn(y_true, y_pred)
/usr/local/lib/python3.7/dist-packages/keras/losses.py:246 call *
return ag_fn(y_true, y_pred, **self._fn_kwargs)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/util/dispatch.py:206 wrapper **
return target(*args, **kwargs)
/usr/local/lib/python3.7/dist-packages/keras/losses.py:1631 categorical_crossentropy
y_true, y_pred, from_logits=from_logits)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/util/dispatch.py:206 wrapper
return target(*args, **kwargs)
/usr/local/lib/python3.7/dist-packages/keras/backend.py:4827 categorical_crossentropy
target.shape.assert_is_compatible_with(output.shape)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/tensor_shape.py:1161 assert_is_compatible_with
raise ValueError("Shapes %s and %s are incompatible" % (self, other))
ValueError: Shapes (None, None) and (None, 28, 28, 12) are incompatible
There are many small errros in your code:
You are using string path instead of variable path while using generators.
Also train path, validation path and test path should be different.
You have not specified input_tensor for VGG19 model.
Your piece of code should be like this:
#train_dir_path is path to your training images
train_data = train_generator.flow_from_directory(directory=train_dir_path,target_size=IMAGE_SHAPE , color_mode="rgb" , class_mode='categorical', batch_size=BATCH_SIZE , shuffle = True )
#valid_dir_path is path to your validation images
valid_data = validation_generator.flow_from_directory(directory=valid_dir_path, target_size=IMAGE_SHAPE , color_mode="rgb" , class_mode='categorical' , batch_size=BATCH_SIZE , shuffle = True )
The output of VGG model should be flatten before passing to Dense layer
Full code:
IMAGE_SHAPE = (224, 224)
BATCH_SIZE = 32
train = ImageDataGenerator()
train_generator = tf.keras.preprocessing.image.ImageDataGenerator(rescale= 1./255, fill_mode= 'nearest')
#train_dir_path is path to your training images
train_data = train_generator.flow_from_directory(directory=train_dir_path,target_size=IMAGE_SHAPE , color_mode="rgb" , class_mode='categorical', batch_size=BATCH_SIZE , shuffle = True )
valid = ImageDataGenerator()
validation_generator = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1./255)
#valid_dir_path is path to your validation images
valid_data = validation_generator.flow_from_directory(directory=valid_dir_path, target_size=IMAGE_SHAPE , color_mode="rgb" , class_mode='categorical' , batch_size=BATCH_SIZE , shuffle = True )
test = ImageDataGenerator()
test_generator = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1./255)
#test_dir_path is path to your test images
test_data = test_generator.flow_from_directory(directory=test_dir_path,target_size=IMAGE_SHAPE , color_mode="rgb" , class_mode='categorical' , batch_size=1 , shuffle = False )
test_data.reset()
from keras.applications.vgg19 import VGG19
vggmodel = VGG19(weights='imagenet', include_top=True, input_tensor=tensorflow.keras.layers.Input(shape=(224,224,3)))
for layers in (vggmodel.layers)[:32]:
print(layers)
layers.trainable = False
X= vggmodel.layers[-12].output
X = tensorflow.keras.layers.Flatten()(X)
predictions = Dense(12, activation="softmax")(X)
model_final = Model(vggmodel.input, predictions)
model_final.compile(optimizer = optimizers.Adam(lr=0.0003), loss='categorical_crossentropy', metrics=["accuracy"])
for image_batch, labels_batch in train_data:
print(image_batch.shape)
print(labels_batch.shape)
break
from keras.callbacks import ModelCheckpoint, EarlyStopping
checkpoint = ModelCheckpoint("vgg16_1.h5", monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
early = EarlyStopping(monitor='val_acc',patience=40, verbose=1, mode='auto')
model_final.fit_generator(generator= train_data, steps_per_epoch= 2, epochs= 100, validation_data= valid_data, validation_steps=1, callbacks=[checkpoint,early])
model_final.save_weights("vgg16_1.h5")
I'm very new to Tensorflow/Keras and deep learning, so my apologies in advance.
I'm creating a basic mixed convolutional neural net to classify images and metadata. I've created the following using the Keras Functional API:
# Define inputs
meta_inputs = tf.keras.Input(shape=(2065,))
img_inputs = tf.keras.Input(shape=(80,120,3,))
# Model 1
meta_layer1 = tf.keras.layers.Dense(64, activation='relu')(meta_inputs)
meta_output_layer = tf.keras.layers.Dense(1, activation='sigmoid')(meta_layer1)
# Model 2
img_conv_layer1 = tf.keras.layers.Conv2D(32, kernel_size=(3,3), padding='same', activation='relu')(img_inputs)
img_pooling_layer1 = tf.keras.layers.MaxPooling2D()(img_conv_layer1)
img_conv_layer2 = tf.keras.layers.Conv2D(64, kernel_size=(3,3), padding='same', activation='relu')(img_pooling_layer1)
img_pooling_layer2 = tf.keras.layers.MaxPooling2D()(img_conv_layer2)
img_flatten_layer = tf.keras.layers.Flatten()(img_pooling_layer2)
img_dense_layer = tf.keras.layers.Dense(1024, activation='relu')(img_flatten_layer)
img_output_layer = tf.keras.layers.Dense(1, activation='sigmoid')(img_dense_layer)
# Merge models
merged = tf.keras.layers.add([meta_output_layer, img_output_layer])
# Define functional model
model = tf.keras.Model(inputs=[meta_inputs, img_inputs], outputs=merged)
# Compile model
auc = tf.keras.metrics.AUC(name = 'auc')
model.compile('adam', loss='binary_crossentropy', metrics=[auc])
I then proceed to fit the model:
epochs = 15
history = model.fit([meta_train, img_train], y_train, epochs=epochs, batch_size=500, validation_data=([meta_test, img_test], y_test))
This produces an error, and I'm quite frankly not sure what to do with it:
InvalidArgumentError Traceback (most recent call last)
<ipython-input-11-5ec0cf9ac1d1> in <module>
1 epochs = 15
----> 2 history = model.fit([meta_train, img_train], y_train, epochs=epochs, batch_size=500, validation_data=([meta_test, img_test], y_test))
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
1098 _r=1):
1099 callbacks.on_train_batch_begin(step)
-> 1100 tmp_logs = self.train_function(iterator)
1101 if data_handler.should_sync:
1102 context.async_wait()
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
826 tracing_count = self.experimental_get_tracing_count()
827 with trace.Trace(self._name) as tm:
--> 828 result = self._call(*args, **kwds)
829 compiler = "xla" if self._experimental_compile else "nonXla"
830 new_tracing_count = self.experimental_get_tracing_count()
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds)
886 # Lifting succeeded, so variables are initialized and we can run the
887 # stateless function.
--> 888 return self._stateless_fn(*args, **kwds)
889 else:
890 _, _, _, filtered_flat_args = \
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in __call__(self, *args, **kwargs)
2940 (graph_function,
2941 filtered_flat_args) = self._maybe_define_function(args, kwargs)
-> 2942 return graph_function._call_flat(
2943 filtered_flat_args, captured_inputs=graph_function.captured_inputs) # pylint: disable=protected-access
2944
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _call_flat(self, args, captured_inputs, cancellation_manager)
1916 and executing_eagerly):
1917 # No tape is watching; skip to running the function.
-> 1918 return self._build_call_outputs(self._inference_function.call(
1919 ctx, args, cancellation_manager=cancellation_manager))
1920 forward_backward = self._select_forward_and_backward_functions(
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py in call(self, ctx, args, cancellation_manager)
553 with _InterpolateFunctionError(self):
554 if cancellation_manager is None:
--> 555 outputs = execute.execute(
556 str(self.signature.name),
557 num_outputs=self._num_outputs,
~/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
57 try:
58 ctx.ensure_initialized()
---> 59 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
60 inputs, attrs, num_outputs)
61 except core._NotOkStatusException as e:
InvalidArgumentError: assertion failed: [predictions must be <= 1] [Condition x <= y did not hold element-wise:] [x (model/add/add:0) = ] [[1.47704351][1.48876262][1.50816929]...] [y (Cast_4/x:0) = ] [1]
[[{{node assert_less_equal/Assert/AssertGuard/else/_11/assert_less_equal/Assert/AssertGuard/Assert}}]] [Op:__inference_train_function_1331]
Function call stack:
train_function
Being new to Tensorflow, Keras, and deep learning in general, I'm not even sure where to begin diagnosing the issue. I don't know what [predictions must be <= 1] [Condition x <= y did not hold element-wise:] is referring to, for example: y_train is an array of 1s and 0s, and should hold to that assertion. I've even tried reshaping it to a (N,1)-D array, to no effect.
Can anyone point me in the right direction on this?
AUC metrics need probabilities in [0,1].
In your model, this not happen due to the sum you do in merged layer. You can solve for example using an average instead of a sum:
merged = tf.keras.layers.Average()([meta_output_layer, img_output_layer])
I am new to Machine Learning and Deep learning. I followed a tutorial for Convolutional Neural Network. But the tutorial was for binary classification. Now I tried my own for a categorical dataset with a few things changed and getting this error.
My code:
import tensorflow as tf
from keras.preprocessing import image
train_datagen = image.ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True
)
training_set = train_datagen.flow_from_directory(
'datasets/training_data/',
target_size=(64,64),
batch_size=32,
class_mode='categorical'
)
test_datagen = image.ImageDataGenerator(
rescale=1./255,
)
test_set = test_datagen.flow_from_directory(
'datasets/testing_data/',
target_size=(64,64),
batch_size=32,
class_mode='categorical'
)
cnn = tf.keras.models.Sequential()
cnn.add(tf.keras.layers.Conv2D(
filters = 32,
kernel_size = 3,
activation = 'relu',
input_shape = [64,64,3]
))
cnn.add(tf.keras.layers.MaxPool2D(
pool_size = 2,
strides = 2
))
cnn.add(tf.keras.layers.Conv2D(
filters = 32,
kernel_size = 3,
activation = 'relu'
))
cnn.add(tf.keras.layers.MaxPool2D(
pool_size = 2,
strides = 2
))
cnn.add(tf.keras.layers.Flatten())
cnn.add(tf.keras.layers.Dense(
units = 128,
activation = 'relu'
))
cnn.add(tf.keras.layers.Dense(
units = 1,
activation = 'softmax'
))
cnn.compile(
optimizer='adam',
loss = 'categorical_crossentropy',
metrics=['accuracy']
)
cnn.fit(x = training_set,
validation_data = test_set,
epochs = 25
)
Error:
InvalidArgumentError Traceback (most recent call last)
<ipython-input-17-52da1b2b0cd1> in <module>
----> 1 cnn.fit(x = training_set,
2 validation_data = test_set,
3 epochs = 25)
~\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py in _method_wrapper(self, *args, **kwargs)
106 def _method_wrapper(self, *args, **kwargs):
107 if not self._in_multi_worker_mode(): # pylint: disable=protected-access
--> 108 return method(self, *args, **kwargs)
109
110 # Running inside `run_distribute_coordinator` already.
~\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
1096 batch_size=batch_size):
1097 callbacks.on_train_batch_begin(step)
-> 1098 tmp_logs = train_function(iterator)
1099 if data_handler.should_sync:
1100 context.async_wait()
~\anaconda3\lib\site-packages\tensorflow\python\eager\def_function.py in __call__(self, *args, **kwds)
778 else:
779 compiler = "nonXla"
--> 780 result = self._call(*args, **kwds)
781
782 new_tracing_count = self._get_tracing_count()
~\anaconda3\lib\site-packages\tensorflow\python\eager\def_function.py in _call(self, *args, **kwds)
805 # In this case we have created variables on the first call, so we run the
806 # defunned version which is guaranteed to never create variables.
--> 807 return self._stateless_fn(*args, **kwds) # pylint: disable=not-callable
808 elif self._stateful_fn is not None:
809 # Release the lock early so that multiple threads can perform the call
~\anaconda3\lib\site-packages\tensorflow\python\eager\function.py in __call__(self, *args, **kwargs)
2827 with self._lock:
2828 graph_function, args, kwargs = self._maybe_define_function(args, kwargs)
-> 2829 return graph_function._filtered_call(args, kwargs) # pylint: disable=protected-access
2830
2831 #property
~\anaconda3\lib\site-packages\tensorflow\python\eager\function.py in _filtered_call(self, args, kwargs, cancellation_manager)
1841 `args` and `kwargs`.
1842 """
-> 1843 return self._call_flat(
1844 [t for t in nest.flatten((args, kwargs), expand_composites=True)
1845 if isinstance(t, (ops.Tensor,
~\anaconda3\lib\site-packages\tensorflow\python\eager\function.py in _call_flat(self, args, captured_inputs, cancellation_manager)
1921 and executing_eagerly):
1922 # No tape is watching; skip to running the function.
-> 1923 return self._build_call_outputs(self._inference_function.call(
1924 ctx, args, cancellation_manager=cancellation_manager))
1925 forward_backward = self._select_forward_and_backward_functions(
~\anaconda3\lib\site-packages\tensorflow\python\eager\function.py in call(self, ctx, args, cancellation_manager)
543 with _InterpolateFunctionError(self):
544 if cancellation_manager is None:
--> 545 outputs = execute.execute(
546 str(self.signature.name),
547 num_outputs=self._num_outputs,
~\anaconda3\lib\site-packages\tensorflow\python\eager\execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
57 try:
58 ctx.ensure_initialized()
---> 59 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
60 inputs, attrs, num_outputs)
61 except core._NotOkStatusException as e:
InvalidArgumentError: Matrix size-incompatible: In[0]: [32,6], In[1]: [128,1]
[[node gradient_tape/sequential/dense_1/MatMul (defined at <ipython-input-16-c714df782bf1>:1) ]] [Op:__inference_train_function_798]
Function call stack:
train_function
If anyone can help me find the solution that would be awesome. Thankyou
When using Keras Tuner, there doesn't seem to be a way to allow the skipping of a problematic combination of hyperparams. For example, the number of filters in a Conv1D layer may not be compatible with all values of pool size in the following MaxPooling1D layer and thus lead to an error in model building. However, this may not be known before running the tuner. Once the tuner is run, this will lead to an error that will terminate the whole tuning process. Is there a way to skip any hyperparam combinations that result in an error?
Sample code:
def model_builder(hp):
model = Sequential()
model.add(
Embedding(
input_dim=hp.Int(
'vocab_size',
min_value=4000,
max_value=10000,
step=1000,
default=4000
),
output_dim=hp.Choice(
'embedding_dim',
values=[32, 64, 128, 256],
default=32
),
input_length=hp.Int(
'max_length',
min_value=50,
max_value=200,
step=10,
default=50
)
)
)
model.add(
Conv1D(
filters=hp.Choice(
'num_filters_1',
values=[32, 64],
default=32
),
kernel_size=hp.Choice(
'kernel_size_1',
values=[3, 5, 7, 9],
default=7
),
activation='relu'
)
)
model.add(
MaxPooling1D(
pool_size=hp.Choice(
'pool_size',
values=[3, 5],
default=5
)
)
)
model.add(
Conv1D(
filters=hp.Choice(
'num_filters_2',
values=[32, 64],
default=32
),
kernel_size=hp.Choice(
'kernel_size_2',
values=[3, 5, 7, 9],
default=7
),
activation='relu'
)
)
model.add(
GlobalMaxPooling1D()
)
model.add(
Dropout(
rate=hp.Float(
'dropout_1',
min_value=0.0,
max_value=0.5,
default=0.5,
step=0.05
)
)
)
model.add(
Dense(
units=hp.Int(
'units',
min_value=10,
max_value=100,
step=10,
default=10
),
kernel_regularizer=tf.keras.regularizers.l2(
hp.Float(
'regularizer_1',
min_value=1e-4,
max_value=1e-1,
sampling='LOG',
default=1e-2
)
),
activation='relu'
)
)
model.add(
Dropout(
hp.Float(
'dropout_2',
min_value=0.0,
max_value=0.5,
default=0.5,
step=0.05
)
)
)
model.add(
Dense(
1,
kernel_regularizer=tf.keras.regularizers.l2(
hp.Float(
'regularizer_2',
min_value=1e-4,
max_value=1e-1,
sampling='LOG',
default=1e-2
)
),
activation='sigmoid'
)
)
model.compile(
loss='binary_crossentropy',
optimizer=hp.Choice(
'optimizer',
values=['rmsprop', 'adam', 'sgd']
),
metrics=['accuracy']
)
return model
tuner = kt.Hyperband(
model_builder,
objective='val_accuracy',
max_epochs=20,
#factor=3,
directory='my_dir',
project_name='cec',
seed=seed
)
class ClearTrainingOutput(tf.keras.callbacks.Callback):
def on_train_end(*args, **kwargs):
IPython.display.clear_output(wait=True)
tuner.search(
X_train,
y_train,
epochs=20,
validation_data=(X_test, y_test),
callbacks=[ClearTrainingOutput()]
)
The error message:
Epoch 1/3
WARNING:tensorflow:Model was constructed with shape (None, 150) for input Tensor("embedding_input:0", shape=(None, 150), dtype=float32), but it was called on an input with incompatible shape (32, 50).
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-62-16a1eae457d8> in <module>
3 IPython.display.clear_output(wait=True)
4
----> 5 tuner.search(
6 X_train,
7 y_train,
~/anaconda3/envs/cec/lib/python3.8/site-packages/kerastuner/engine/base_tuner.py in search(self, *fit_args, **fit_kwargs)
128
129 self.on_trial_begin(trial)
--> 130 self.run_trial(trial, *fit_args, **fit_kwargs)
131 self.on_trial_end(trial)
132 self.on_search_end()
~/anaconda3/envs/cec/lib/python3.8/site-packages/kerastuner/tuners/hyperband.py in run_trial(self, trial, *fit_args, **fit_kwargs)
385 fit_kwargs['epochs'] = hp.values['tuner/epochs']
386 fit_kwargs['initial_epoch'] = hp.values['tuner/initial_epoch']
--> 387 super(Hyperband, self).run_trial(trial, *fit_args, **fit_kwargs)
388
389 def _build_model(self, hp):
~/anaconda3/envs/cec/lib/python3.8/site-packages/kerastuner/engine/multi_execution_tuner.py in run_trial(self, trial, *fit_args, **fit_kwargs)
94
95 model = self.hypermodel.build(trial.hyperparameters)
---> 96 history = model.fit(*fit_args, **copied_fit_kwargs)
97 for metric, epoch_values in history.history.items():
98 if self.oracle.objective.direction == 'min':
~/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py in _method_wrapper(self, *args, **kwargs)
64 def _method_wrapper(self, *args, **kwargs):
65 if not self._in_multi_worker_mode(): # pylint: disable=protected-access
---> 66 return method(self, *args, **kwargs)
67
68 # Running inside `run_distribute_coordinator` already.
~/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
846 batch_size=batch_size):
847 callbacks.on_train_batch_begin(step)
--> 848 tmp_logs = train_function(iterator)
849 # Catch OutOfRangeError for Datasets of unknown size.
850 # This blocks until the batch has finished executing.
~/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
578 xla_context.Exit()
579 else:
--> 580 result = self._call(*args, **kwds)
581
582 if tracing_count == self._get_tracing_count():
~/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds)
625 # This is the first call of __call__, so we have to initialize.
626 initializers = []
--> 627 self._initialize(args, kwds, add_initializers_to=initializers)
628 finally:
629 # At this point we know that the initialization is complete (or less
~/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
503 self._graph_deleter = FunctionDeleter(self._lifted_initializer_graph)
504 self._concrete_stateful_fn = (
--> 505 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
506 *args, **kwds))
507
~/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
2444 args, kwargs = None, None
2445 with self._lock:
-> 2446 graph_function, _, _ = self._maybe_define_function(args, kwargs)
2447 return graph_function
2448
~/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _maybe_define_function(self, args, kwargs)
2775
2776 self._function_cache.missed.add(call_context_key)
-> 2777 graph_function = self._create_graph_function(args, kwargs)
2778 self._function_cache.primary[cache_key] = graph_function
2779 return graph_function, args, kwargs
~/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
2655 arg_names = base_arg_names + missing_arg_names
2656 graph_function = ConcreteFunction(
-> 2657 func_graph_module.func_graph_from_py_func(
2658 self._name,
2659 self._python_function,
~/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
979 _, original_func = tf_decorator.unwrap(python_func)
980
--> 981 func_outputs = python_func(*func_args, **func_kwargs)
982
983 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
~/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in wrapped_fn(*args, **kwds)
439 # __wrapped__ allows AutoGraph to swap in a converted function. We give
440 # the function a weak reference to itself to avoid a reference cycle.
--> 441 return weak_wrapped_fn().__wrapped__(*args, **kwds)
442 weak_wrapped_fn = weakref.ref(wrapped_fn)
443
~/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
966 except Exception as e: # pylint:disable=broad-except
967 if hasattr(e, "ag_error_metadata"):
--> 968 raise e.ag_error_metadata.to_exception(e)
969 else:
970 raise
ValueError: in user code:
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py:571 train_function *
outputs = self.distribute_strategy.run(
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/distribute/distribute_lib.py:951 run **
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/distribute/distribute_lib.py:2290 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/distribute/distribute_lib.py:2649 _call_for_each_replica
return fn(*args, **kwargs)
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py:531 train_step **
y_pred = self(x, training=True)
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/keras/engine/base_layer.py:927 __call__
outputs = call_fn(cast_inputs, *args, **kwargs)
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/keras/engine/sequential.py:277 call
return super(Sequential, self).call(inputs, training=training, mask=mask)
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/keras/engine/network.py:717 call
return self._run_internal_graph(
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/keras/engine/network.py:888 _run_internal_graph
output_tensors = layer(computed_tensors, **kwargs)
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/keras/engine/base_layer.py:927 __call__
outputs = call_fn(cast_inputs, *args, **kwargs)
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/keras/layers/convolutional.py:207 call
outputs = self._convolution_op(inputs, self.kernel)
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/ops/nn_ops.py:1106 __call__
return self.conv_op(inp, filter)
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/ops/nn_ops.py:638 __call__
return self.call(inp, filter)
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/ops/nn_ops.py:231 __call__
return self.conv_op(
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/ops/nn_ops.py:220 _conv1d
return conv1d(
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/util/deprecation.py:574 new_func
return func(*args, **kwargs)
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/util/deprecation.py:574 new_func
return func(*args, **kwargs)
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/ops/nn_ops.py:1655 conv1d
result = gen_nn_ops.conv2d(
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/ops/gen_nn_ops.py:965 conv2d
_, _, _op, _outputs = _op_def_library._apply_op_helper(
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/framework/op_def_library.py:742 _apply_op_helper
op = g._create_op_internal(op_type_name, inputs, dtypes=None,
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py:593 _create_op_internal
return super(FuncGraph, self)._create_op_internal( # pylint: disable=protected-access
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/framework/ops.py:3319 _create_op_internal
ret = Operation(
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/framework/ops.py:1816 __init__
self._c_op = _create_c_op(self._graph, node_def, inputs,
/home/george/anaconda3/envs/cec/lib/python3.8/site-packages/tensorflow/python/framework/ops.py:1657 _create_c_op
raise ValueError(str(e))
ValueError: Negative dimension size caused by subtracting 7 from 6 for '{{node sequential/conv1d_1/conv1d}} = Conv2D[T=DT_FLOAT, data_format="NHWC", dilations=[1, 1, 1, 1], explicit_paddings=[], padding="VALID", strides=[1, 1, 1, 1], use_cudnn_on_gpu=true](sequential/conv1d_1/conv1d/ExpandDims, sequential/conv1d_1/conv1d/ExpandDims_1)' with input shapes: [32,1,6,32], [1,7,32,32].
I myself have been looking for a solution to this problem for a very long time and found it. Yes, not very elegant, but it works. I'll leave it here, maybe it will help someone else.
The point is to wrap the model construction in a try-except block and, if a Value Error occurs, build a highly simplified model. The important thing here is to create your own loss function that would return too large a loss value, which we could catch with our own callback function and stop training the model (code with an example for convolutional neural networks attached)
P.S. It would be possible to make your own loss function return NaN, and catch it with a ready-made callback function TerminateOnNaN(). But for some reason keras-tuner thinks that NaN < any number, and therefore it will give the value NaN for best val_loss
def invalid_loss(y_true, y_pred):
return keras.losses.BinaryCrossentropy()(y_true, y_pred) + 2000000
def invalid_model():
model = keras.Sequential()
model.add(layers.Input((input_shape)))
model.add(layers.Resizing(height=2, width=2))
model.add(layers.Conv2D(filters=1,
kernel_size=2,
activation='relu',
))
model.add(layers.GlobalMaxPooling2D())
model.add(layers.Dense(units=output_shape,
activation="sigmoid",
))
model.compile(optimizer="Adam",
loss=invalid_loss,
metrics=[metrics.BinaryAccuracy()])
return model
def build_model(hp):
try:
model = keras.Sequential()
model.add(layers.Input((input_shape)))
...
model.add(layers.Dense(units=output_shape,
activation=dense_activation))
model.compile(optimizer="Adam",
loss=losses.BinaryCrossentropy(),
metrics=[metrics.BinaryAccuracy()])
except ValueError:
model = invalid_model()
return model
And here is an example of your own callback, which would stop training so as not to waste time on a "invalid" model
class EarlyStoppingByLoss(keras.callbacks.Callback):
def __init__(self, max_loss):
self.max_loss = max_loss
def on_train_batch_end(self, batch, logs=None):
if logs["loss"] >= self.max_loss:
self.model.stop_training = True
You can also control oversized models (for example, if you use Flatten() when switching from convolutional layers to fully connected ones)
from keras.utils.layer_utils import count_params
class EarlyStoppingByModelSize(keras.callbacks.Callback):
def __init__(self, max_size):
self.max_size = max_size
def on_train_begin(self, logs=None):
trainable_count = count_params(self.model.trainable_weights)
if trainable_count > self.max_size:
self.model.stop_training = True
And, accordingly, at the end we add these callbacks to the list and use them when training the model
callbacks = []
callbacks.append(EarlyStoppingByLoss(900000))
callbacks.append(EarlyStoppingByModelSize(12000000))
tuner.search(x=x_train,
y=y_train,
epochs=epochs,
validation_data=(x_test, y_test),
callbacks=callbacks,
verbose=1,
batch_size=batch_size)
I would like to develop a DCGAN with resolution of 256x256. To do so I need to use multiple GPU since only one it is not enough and it will probably take too much time.
I followed the procedure explained in the documentation at this link
https://www.tensorflow.org/beta/guide/distribute_strategy
At the top of the script I used
strategy = tf.distribute.MirroredStrategy()
Then inside the Generator, Discriminator, and Loss functions I used
with strategy.scope():
The error I get is:
RuntimeError: Replica-local variables may only be assigned in a replica context.
strategy = tf.distribute.MirroredStrategy()
path = '/my/dataset/path/'
file_paths = [f for f in glob.glob(path + "**/*.jpg", recursive=True)]
tensor_data = np.zeros((len(file_paths), 256, 256, 3)).astype('float32')
for i in range(len(file_paths)):
img_tensor = tf.image.decode_image(tf.io.read_file(file_paths[i]))
tensor_data[i] = img_tensor
for i in range(tensor_data.shape[0]):
tensor_data[i] = ((tensor_data[i] - 127.5) / 127.5)
BUFFER_SIZE = len(file_paths)
BATCH_SIZE = 256
train_dataset = tf.data.Dataset.from_tensor_slices(tensor_data).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
def make_generator_model():
with strategy.scope():
model = tf.keras.Sequential()
model.add(layers.Dense(64*64*1536, use_bias=False, input_shape=(100,)))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Reshape((64, 64, 1536)))
assert model.output_shape == (None, 64, 64, 1536) # Note: None is the batch size
model.add(layers.Conv2DTranspose(1536, (5, 5), strides=(1, 1), padding='same', use_bias=False))
assert model.output_shape == (None, 64, 64, 1536)
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Conv2DTranspose(768, (5, 5), strides=(2, 2), padding='same', use_bias=False))
assert model.output_shape == (None, 128, 128, 768)
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Conv2DTranspose(3, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh'))
assert model.output_shape == (None, 256, 256, 3)
return model
generator = make_generator_model()
noise = tf.random.normal([1, 100])
generated_image = generator(noise, training=False)
sample = generated_image[0, :, :, :];
sample = tf.cast(sample, tf.int32)
plt.imshow(sample, cmap=None)
def make_discriminator_model():
with strategy.scope():
model = tf.keras.Sequential()
model.add(layers.Conv2D(256, (5, 5), strides=(2, 2), padding='same', input_shape=[256, 256, 3]))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Flatten())
model.add(layers.Dense(1))
return model
discriminator = make_discriminator_model()
decision = discriminator(generated_image)
print (decision)
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
def discriminator_loss(real_output, fake_output):
with strategy.scope():
real_loss = cross_entropy(tf.ones_like(real_output), real_output)
fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)
total_loss = real_loss + fake_loss
return total_loss
def generator_loss(fake_output):
with strategy.scope():
return cross_entropy(tf.ones_like(fake_output), fake_output)
generator_optimizer = tf.keras.optimizers.Adam(1e-4)
discriminator_optimizer = tf.keras.optimizers.Adam(1e-4)
checkpoint_dir = './training_checkpoints/'
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer,
discriminator_optimizer=discriminator_optimizer,
generator=generator,
discriminator=discriminator)
EPOCHS = 2000
noise_dim = 100
num_examples_to_generate = 16
seed = tf.random.normal([num_examples_to_generate, noise_dim])
# Notice the use of `tf.function`
# This annotation causes the function to be "compiled".
#tf.function
def train_step(images):
noise = tf.random.normal([BATCH_SIZE, noise_dim])
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
generated_images = generator(noise, training=True)
real_output = discriminator(images, training=True)
fake_output = discriminator(generated_images, training=True)
gen_loss = generator_loss(fake_output)
disc_loss = discriminator_loss(real_output, fake_output)
gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)
gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)
generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))
discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))
def train(dataset, epochs):
for epoch in range(epochs):
start = time.time()
for image_batch in dataset:
train_step(image_batch)
# Produce images for the GIF as we go
display.clear_output(wait=True)
generate_and_save_images(generator,
epoch + 1,
seed)
# Save the model every 15 epochs
os.makedirs(os.path.dirname(checkpoint_prefix), exist_ok=True)
if (epoch + 1) % 50 == 0:
checkpoint.save(file_prefix = checkpoint_prefix)
print ('Time for epoch {} is {} sec'.format(epoch + 1, time.time()-start))
# Generate after the final epoch
display.clear_output(wait=True)
generate_and_save_images(generator,epochs,seed)
def generate_and_save_images(model, epoch, test_input):
# Notice `training` is set to False.
# This is so all layers run in inference mode (batchnorm).
predictions = model(test_input, training=False)
fig = plt.figure(figsize=(4,4))
for i in range(predictions.shape[0]):
plt.subplot(8, 8, i+1)
sample = predictions[i, :, :, :] * 127.5 + 127.5
sample = tf.cast(sample, tf.int32)
plt.imshow(sample, cmap=None)
plt.axis('off')
filename = './screens/eye-256x256/1/image_at_epoch_{:04d}.png'
os.makedirs(os.path.dirname(filename), exist_ok=True)
if (epoch + 1) % 10 == 0:
plt.savefig(filename.format(epoch))
plt.show()
get_ipython().run_cell_magic('time', '', 'train(train_dataset, EPOCHS)')
The error is the following
Executing op ExperimentalRebatchDataset in device /job:localhost/replica:0/task:0/device:CPU:0
Executing op ExperimentalAutoShardDataset in device /job:localhost/replica:0/task:0/device:CPU:0
Executing op OptimizeDataset in device /job:localhost/replica:0/task:0/device:CPU:0
Executing op ModelDataset in device /job:localhost/replica:0/task:0/device:CPU:0
Executing op MultiDeviceIterator in device /job:localhost/replica:0/task:0/device:CPU:0
Executing op MultiDeviceIteratorInit in device /job:localhost/replica:0/task:0/device:CPU:0
Executing op MultiDeviceIteratorToStringHandle in device /job:localhost/replica:0/task:0/device:CPU:0
Executing op GeneratorDataset in device /job:localhost/replica:0/task:0/device:GPU:0
Executing op GeneratorDataset in device /job:localhost/replica:0/task:0/device:GPU:1
Executing op PrefetchDataset in device /job:localhost/replica:0/task:0/device:GPU:0
Executing op AnonymousIteratorV2 in device /job:localhost/replica:0/task:0/device:GPU:0
Executing op MakeIterator in device /job:localhost/replica:0/task:0/device:GPU:0
Executing op PrefetchDataset in device /job:localhost/replica:0/task:0/device:GPU:1
Executing op AnonymousIteratorV2 in device /job:localhost/replica:0/task:0/device:GPU:1
Executing op MakeIterator in device /job:localhost/replica:0/task:0/device:GPU:1
Executing op IteratorGetNextSync in device /job:localhost/replica:0/task:0/device:GPU:0
Executing op IteratorGetNextSync in device /job:localhost/replica:0/task:0/device:GPU:1
Executing op DestroyResourceOp in device /job:localhost/replica:0/task:0/device:CPU:0
Executing op DeleteIterator in device /job:localhost/replica:0/task:0/device:GPU:1
Executing op DeleteIterator in device /job:localhost/replica:0/task:0/device:GPU:0
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<timed exec> in <module>
<ipython-input-20-88a9879432c7> in train(dataset, epochs)
4
5 for image_batch in dataset:
----> 6 train_step(image_batch)
7
8 # Produce images for the GIF as we go
/usr/local/lib/python3.5/dist-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
414 # This is the first call of __call__, so we have to initialize.
415 initializer_map = {}
--> 416 self._initialize(args, kwds, add_initializers_to=initializer_map)
417 if self._created_variables:
418 try:
/usr/local/lib/python3.5/dist-packages/tensorflow/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
357 self._concrete_stateful_fn = (
358 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
--> 359 *args, **kwds))
360
361 def invalid_creator_scope(*unused_args, **unused_kwds):
/usr/local/lib/python3.5/dist-packages/tensorflow/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
1358 if self.input_signature:
1359 args, kwargs = None, None
-> 1360 graph_function, _, _ = self._maybe_define_function(args, kwargs)
1361 return graph_function
1362
/usr/local/lib/python3.5/dist-packages/tensorflow/python/eager/function.py in _maybe_define_function(self, args, kwargs)
1646 graph_function = self._function_cache.primary.get(cache_key, None)
1647 if graph_function is None:
-> 1648 graph_function = self._create_graph_function(args, kwargs)
1649 self._function_cache.primary[cache_key] = graph_function
1650 return graph_function, args, kwargs
/usr/local/lib/python3.5/dist-packages/tensorflow/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
1539 arg_names=arg_names,
1540 override_flat_arg_shapes=override_flat_arg_shapes,
-> 1541 capture_by_value=self._capture_by_value),
1542 self._function_attributes)
1543
/usr/local/lib/python3.5/dist-packages/tensorflow/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
714 converted_func)
715
--> 716 func_outputs = python_func(*func_args, **func_kwargs)
717
718 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
/usr/local/lib/python3.5/dist-packages/tensorflow/python/eager/def_function.py in wrapped_fn(*args, **kwds)
307 # __wrapped__ allows AutoGraph to swap in a converted function. We give
308 # the function a weak reference to itself to avoid a reference cycle.
--> 309 return weak_wrapped_fn().__wrapped__(*args, **kwds)
310 weak_wrapped_fn = weakref.ref(wrapped_fn)
311
/usr/local/lib/python3.5/dist-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
704 except Exception as e: # pylint:disable=broad-except
705 if hasattr(e, "ag_error_metadata"):
--> 706 raise e.ag_error_metadata.to_exception(type(e))
707 else:
708 raise
RuntimeError: in converted code:
<ipython-input-19-d2ffe8a85706>:9 train_step *
generated_images = generator(noise, training=True)
/usr/local/lib/python3.5/dist-packages/tensorflow/python/keras/engine/base_layer.py:667 __call__
outputs = call_fn(inputs, *args, **kwargs)
/usr/local/lib/python3.5/dist-packages/tensorflow/python/keras/engine/sequential.py:248 call
return super(Sequential, self).call(inputs, training=training, mask=mask)
/usr/local/lib/python3.5/dist-packages/tensorflow/python/keras/engine/network.py:753 call
return self._run_internal_graph(inputs, training=training, mask=mask)
/usr/local/lib/python3.5/dist-packages/tensorflow/python/keras/engine/network.py:895 _run_internal_graph
output_tensors = layer(computed_tensors, **kwargs)
/usr/local/lib/python3.5/dist-packages/tensorflow/python/keras/engine/base_layer.py:667 __call__
outputs = call_fn(inputs, *args, **kwargs)
/usr/local/lib/python3.5/dist-packages/tensorflow/python/keras/layers/normalization.py:782 call
self.add_update(mean_update)
/usr/local/lib/python3.5/dist-packages/tensorflow/python/util/deprecation.py:507 new_func
return func(*args, **kwargs)
/usr/local/lib/python3.5/dist-packages/tensorflow/python/keras/engine/base_layer.py:1095 add_update
update()
/usr/local/lib/python3.5/dist-packages/tensorflow/python/keras/layers/normalization.py:775 mean_update
return tf_utils.smart_cond(training, true_branch, false_branch)
/usr/local/lib/python3.5/dist-packages/tensorflow/python/keras/utils/tf_utils.py:58 smart_cond
pred, true_fn=true_fn, false_fn=false_fn, name=name)
/usr/local/lib/python3.5/dist-packages/tensorflow/python/framework/smart_cond.py:54 smart_cond
return true_fn()
/usr/local/lib/python3.5/dist-packages/tensorflow/python/keras/layers/normalization.py:773 <lambda>
true_branch = lambda: _do_update(self.moving_mean, new_mean)
/usr/local/lib/python3.5/dist-packages/tensorflow/python/keras/layers/normalization.py:769 _do_update
inputs_size)
/usr/local/lib/python3.5/dist-packages/tensorflow/python/keras/layers/normalization.py:458 _assign_moving_average
return state_ops.assign_sub(variable, update_delta, name=scope)
/usr/local/lib/python3.5/dist-packages/tensorflow/python/ops/state_ops.py:164 assign_sub
return ref.assign_sub(value)
/usr/local/lib/python3.5/dist-packages/tensorflow/python/distribute/values.py:1394 assign_sub
_assert_replica_context(self._distribute_strategy)
/usr/local/lib/python3.5/dist-packages/tensorflow/python/distribute/values.py:1381 _assert_replica_context
"Replica-local variables may only be assigned in a replica context.")
RuntimeError: Replica-local variables may only be assigned in a replica context.
You need to distribute your dataset aslo, for details please refer to this URL.
- https://www.tensorflow.org/beta/tutorials/distribute/training_loops
train_dist_dataset = strategy.experimental_distribute_dataset(train_dataset)
every part of your model creates under strategy scope like optimizer also.
with strategy.scope():
optimizer = tf.keras.optimizers.Adam()