How to save vgg model in keras - tensorflow

I have to following model:
#load VGG16 as convolutional base
conv_base = VGG16(weights='imagenet',
include_top=False,
input_shape=(150, 150, 3))
checkpoint_filepath = "/content/gdrive/MyDrive/Colab Notebooks/Deep_Learning/models/Benign_Melign_VGG_noAug_final.h5"
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
save_weights_only=True,
monitor='val_loss',
mode='auto',
save_best_only=True)
#add custom fully-connected network on top of the already-trained base network
model = models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dense(256, activation="relu"))
model.add(layers.Dense(1, activation="sigmoid"))
#freeze convolutional base
conv_base.trainable = False
model.compile(loss="binary_crossentropy",
optimizer=optimizers.Adam(lr=1e-3), # lr = 0.0001
metrics=METRICS)
#train fully-connected added part
history = model.fit(train_generat.flow(train_dataset_split,
train_labels_split,
batch_size=BATCH_SIZE,
shuffle=False),
steps_per_epoch=len(train_dataset_split) // BATCH_SIZE,
epochs=100,
validation_data=valid_generat.flow(valid_dataset_split,
valid_labels_split,
batch_size=BATCH_SIZE,
shuffle=False),
validation_steps=len(valid_labels_split) // BATCH_SIZE,
callbacks=[model_checkpoint_callback])
model.summary()
model.save('Benign_Melign_VGG_noAug_final.h5')
the error is:
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/engine/base_layer.py in get_config(self)
719 raise NotImplementedError('Layer %s has arguments in `__init__` and '
720 'therefore must override `get_config`.' %
--> 721 self.__class__.__name__)
722 return config
723
NotImplementedError: Layer ModuleWrapper has arguments in `__init__` and therefore must override `get_config`.
there is a problem in the last line when I want to save the model. then I also need to load it later. I need to save and load the whole model, not just the weights. can you help e with this?

Related

ValueError: Dimensions must be equal, but are 2 and 64 for '{{node binary_crossentropy/mul}} with input shapes[?,2], [?,64]

I'm trying binary classification of text with bi-lstm model but getting this error: ValueError: Dimensions must be equal, but are 2 and 64 for '{{node binary_crossentropy/mul}} = Mul[T=DT_FLOAT](binary_crossentropy/Cast, binary_crossentropy/Log)' with input shapes: [?,2], [?,64].
I am a beginner please provide some valuable solutions.
text=df['text']
label=df['label']
X = pad_sequences(X, maxlen=max_len,padding=pad_type,truncating=trunc_type)
Y = pd.get_dummies(label).values
X_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size = 0.20)
print(X_train.shape,Y_train.shape)
print(X_test.shape,Y_test.shape)
#model creation
model=tf.keras.Sequential([
# add an embedding layer
tf.keras.layers.Embedding(word_count, 16, input_length=max_len),
tf.keras.layers.Dropout(0.2),
# add another bi-lstm layer
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(2,return_sequences=True)),
# add a dense layer
tf.keras.layers.Dense(32, activation=tf.keras.activations.relu),
tf.keras.layers.Dense(32, activation=tf.keras.activations.relu),
tf.keras.layers.Dense(32, activation=tf.keras.activations.relu),
tf.keras.layers.Dense(32, activation=tf.keras.activations.softmax),
# add the prediction layer
tf.keras.layers.Dense(1, activation=tf.keras.activations.sigmoid),
])
model.compile(loss=tf.keras.losses.BinaryCrossentropy(), optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy'])
model.summary()
history = model.fit(X_train, Y_train, validation_data=(X_test, Y_test), epochs = 10, batch_size=batch_size, callbacks = [callback_func], verbose=1)
The output dimension of the prediction layer of the binary classification should be 2:
# add the prediction layer
tf.keras.layers.Dense(2, activation=tf.keras.activations.sigmoid)
Flatten:
#model creation
model=tf.keras.Sequential([
# add an embedding layer
tf.keras.layers.Embedding(word_count, 16, input_length=max_len),
tf.keras.layers.Dropout(0.2),
# add another bi-lstm layer
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(2,return_sequences=True)),
# add flatten
tf.keras.layers.Flatten(), #<========================
# add a dense layer
tf.keras.layers.Dense(32, activation=tf.keras.activations.relu),
tf.keras.layers.Dense(32, activation=tf.keras.activations.relu),
tf.keras.layers.Dense(32, activation=tf.keras.activations.relu),
tf.keras.layers.Dense(32, activation=tf.keras.activations.softmax),
# add the prediction layer
tf.keras.layers.Dense(2, activation=tf.keras.activations.sigmoid),
])

Tensorboard graph messy (readvariableop_resource nodes) using tf.summay.trace_export

The code bellow build two tensorboard graphs for the same model, while using Keras API build nice simple graph, using tf.summary.trace_export() add for each variable define in the graph a node in the external scope with the suffix "readvariableop_resource", which make the graph be really messy as the number of the parameters increase.
(In the example below we have 2 dense layer each one have 2 variable (kernel and bias) total 4 variables (4 nodes))
from datetime import datetime
import tensorflow as tf
from tensorflow import keras
# Define the model.
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(32, activation='relu'),
keras.layers.Dropout(0.2),
keras.layers.Dense(10, activation='softmax')
])
model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
(train_images, train_labels), _ = keras.datasets.mnist.load_data()
train_images = train_images / 255.0
# Define the Keras TensorBoard callback.
logdir="logs/fit/" + datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir)
# Train the model.
model.fit(
train_images,
train_labels,
batch_size=64,
epochs=1,
callbacks=[tensorboard_callback])
#tf.function
def traceme(x):
return model(x)
logdir="logs/fit1/" + datetime.now().strftime("%Y%m%d-%H%M%S")
writer = tf.summary.create_file_writer(logdir)
tf.summary.trace_on(graph=True)
# Forward pass
traceme(tf.zeros((1, 28, 28, 1)))
with writer.as_default():
tf.summary.trace_export(name="model_trace", step=0)

CoreMLtools and Keras ValueError: need more than 1 value to unpack

I'm fine-tuning the Inception V3 model with Keras, in order to convert it with coremltools into a .mlmodel file.
However, when converting the model coremltools throws an error saying the following when the converter reaches the last layer of the model:
coremltools/models/neural_network.py", line 2501, in set_pre_processing_parameters
channels, height, width = array_shape
ValueError: need more than 1 value to unpack
I used the code from the Keras documentation on applications found here: https://keras.io/applications/#fine-tune-inceptionv3-on-a-new-set-of-classes
And added a piece of code loading my dataset from the VGG example found here: https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html
My final script looks like this, using TesorFlow as backend:
LOAD THE DATA
from keras.preprocessing.image import ImageDataGenerator
img_width, img_height = 299, 299
train_data_dir = 'data/train'
validation_data_dir = 'data/validation'
nb_train_samples = 358
nb_validation_samples = 21
epochs = 1
batch_size = 15
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
TRAIN THE MODEL
base_model = InceptionV3(weights='imagenet', include_top=False)
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(1024, activation='relu')(x)
predictions = Dense(7, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=predictions)
for layer in base_model.layers:
layer.trainable = False
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=nb_validation_samples // batch_size)
for i, layer in enumerate(base_model.layers):
print(i, layer.name)
for layer in model.layers[:249]:
layer.trainable = False
for layer in model.layers[249:]:
layer.trainable = True
from keras.optimizers import SGD
model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy', metrics=['accuracy'])
model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=nb_validation_samples // batch_size)
model.save('finetuned_inception.h5')
I'm writing here in response to #SwimBikeRun's request (as I need a bit more space)
I was converting YOLO to Keras and then Keras to CoreML. For conversion I was using this script https://github.com/qqwweee/keras-yolo3/blob/master/convert.py
In the conversion-process the model was eventually created like that:
input_layer = Input(shape=(None, None, 3))
...
model = Model(inputs=input_layer, outputs=[all_layers[i] for i in out_index])
And those "None"-inputs was what made CoreML conversion fail. For CoreML the input-size to your model must be known. So I changed it to this:
input_layer = Input(shape=(416, 416, 3)
Your input-size will probably vary.
For your original question:
Maybe check your base_model.input size for the same problem.

Dimension mismatch error in Keras with Tensorflow Backend

I'm fine-tuning the last convolutional block of the VGG16 model alongside the top-level classifier by starting from a trained network, then re-training it on a new dataset using very small weight updates. I instantiated the convolutional base of VGG16 and loaded its weights, added the previously defined fully-connected model on top, and loaded its weights and freeze the layers of the VGG16 model up to the last convolutional block. Here is the code:
from keras import applications
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
from keras.models import Sequential
from keras.layers import Dropout, Flatten, Dense
# path to the model weights files.
weights_path = '../keras/examples/vgg16_weights.h5'
top_model_weights_path = 'modelvgg16.h5'
# dimensions of our images.
img_width, img_height = 32, 32
train_data_dir = 'data7/train'
validation_data_dir = 'data7/validation'
nb_train_samples = 1600
nb_validation_samples = 400
epochs = 100
batch_size = 16
# build the VGG16 network
model = applications.VGG16(weights='imagenet', include_top=False, input_shape=(224,224,3))
print('Model loaded.')
# build a classifier model to put on top of the convolutional model
top_model = Sequential()
top_model.add(Flatten(input_shape=model.output_shape[1:]))
top_model.add(Dense(256, activation='relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(1, activation='sigmoid'))
# note that it is necessary to start with a fully-trained
# classifier, including the top classifier,
# in order to successfully do fine-tuning
top_model.load_weights(top_model_weights_path)
# add the model on top of the convolutional base
model.add(top_model)
# set the first 25 layers (up to the last conv block)
# to non-trainable (weights will not be updated)
for layer in model.layers[:15]:
layer.trainable = False
# compile the model with a SGD/momentum optimizer
# and a very slow learning rate.
model.compile(loss='binary_crossentropy',
optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
metrics=['accuracy'])
# prepare data augmentation configuration
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='binary')
# fine-tune the model
model.fit_generator(
train_generator,
samples_per_epoch=nb_train_samples,
epochs=epochs,
validation_data=validation_generator,
nb_val_samples=nb_validation_samples)
On executing the script I get:
ValueError: Dimension O in both shapes must be equal, but are 25088 and 512 for 'Assign_26' (op: 'Assign') with input shapes: [25088, 256], [512, 256].
The error pops up with this instruction:
top_model.load_weights(top_model_weights_path)
Kindly help with the modification of the code. Thanks in advance.

Problems with input shapes at finetuning VGG with Keras

I'm trying to finetune the last layer of the VGG-16. Here is the part of the code where i make the new model:
def train2false(model):
for layer in model.layers:
layer.trainable = False
return model
def define_training_layers(model):
model.layers = model.layers[0:21]
model = train2false(model)
last_layer = model.get_layer('fc7')
out = Dense(n_classes, activation='softmax', name='fc8')(last_layer)
model = Model(input=model.input, output=out)
return model
def compile_model(epochs, lrate, model):
decay = lrate / epochs
sgd = SGD(lr=lrate, momentum=0, decay=0.0002, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
print (model.summary())
return model
def train_evaluate(model, X_train, y_train, X_test, y_test, epochs):
model.fit(X_train, y_train, validation_data=(X_test, y_test), nb_epoch=epochs, batch_size=32)
# Final evaluation of the model
scores = model.evaluate(X_test, y_test, verbose=0)
print("Accuracy: %.2f%%" % (scores[1]*100))
return model
X_train, X_test, labels_test, labels_train, n_classes = load_dataset()
image_input = Input(shape=(3, 224, 224))
vgg_model = VGGFace(input_tensor= image_input, include_top=True)
custom_vgg_model = define_training_layers(vgg_model)
custom_vgg_model = compile_model(epochs=50, lrate=0.001, model=custom_vgg_model)
custom_vgg_model = train_evaluate(custom_vgg_model, X_train=X_train, y_train=labels_train, X_test=X_test, y_test=labels_test, epochs=50)
I get the following error:
tensorflow.python.framework.errors_impl.InvalidArgumentError:
Dimension 1 in both shapes must be equal, but are 1000 and 2622 for
'Assign_30' (op: 'Assign') with input shapes: [4096,1000],
[4096,2622].
It works for me if i try to finetune all the fully connected part with include_top=False instead of just the softmax activation.
Is there something that i'm missing?
Solved!!! I've take the pre-trained weights from https://github.com/rcmalli/keras-vggface/releases/download/v1.0/rcmalli_vggface_th_weights_th_ordering.h5 which has 2622 number of outputs and i had 1000 outputs. So just change the number of outputs for the last layer in VGG.py