Keras ValueError trying to load model - tensorflow

I am using Anaconda Navigator, Jupyter to be precised.
import tensorflow as tf
from tensorflow import keras
print(tf.__version__)
>>> 1.14.0
This is my model
def create_model():
model = tf.keras.Sequential([
keras.layers.Dense(86, activation='relu', kernel_regularizer=keras.regularizers.l2(0.0001),input_shape=(129,)),
keras.layers.Dropout(0.2),
keras.layers.Dense(142, activation='relu', kernel_regularizer=keras.regularizers.l2(0.0001)),
keras.layers.Dropout(0.2),
keras.layers.Dense(4, activation='softmax')
])
return model
model = create_model()
# Display the model's architecture
model.summary()
After training,predicting and evaluating my model, I decided to save it using
model.save('/Users/Jennifer/myproject/my_model.h5')
I checked the directory and folder with the h5py file. And I decided to load it using
new_model1 = tf.keras.models.load_model('/Users/Jennifer/myproject/my_model.h5')
I got an Error
ValueError: Unknown entries in loss dictionary: ['class_name', 'config']. Only expected following keys: ['dense_17']
Please help me. What should I do? I have almost spent the whole day trying to solve this issue. Thanks

Here is a bit of a work around that just loads the weights:
#!/usr/bin/env python3
from tensorflow import keras
import os
def create_model():
model = keras.Sequential([
keras.layers.Dense(86, activation='relu', kernel_regularizer=keras.regularizers.l2(0.0001),input_shape=(129,)),
keras.layers.Dropout(0.2),
keras.layers.Dense(142, activation='relu', kernel_regularizer=keras.regularizers.l2(0.0001)),
keras.layers.Dropout(0.2),
keras.layers.Dense(4, activation='softmax')
])
return model
if os.path.exists("junk.h5"):
model = create_model()
model.load_weights("junk.h5")
else:
model = create_model()
model.compile(optimizer=keras.optimizers.Adam(0.0001), loss=keras.losses.CategoricalCrossentropy(from_logits=True), metrics=['accuracy'])
model.save("junk.h5")
Another workaround would be to save the model without the optimizer
model.save("junk.h5", include_optimizer=False)
It looks like the loss function you're using creates a dictionary that has invalid keys. This sounds like a bug in keras/tensorflow. That is why the colab one probably worked because it was using a newer version.

Related

Tensorboard callback doesn't work when calling

I am new to Tensorflow and Keras. I just started beginning my Deep learning Journey. I installed Tensorflow 2.4.3 as well as Keras. I was learning Tensorboard. I created a model for imdb dataset as follows
import tensorflow as tf
import keras
from tensorflow.keras import *
from tensorflow.keras.datasets import imdb
from tensorflow.keras.preprocessing import sequence
## model making
max_features = 2000
max_len = 500
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
x_train = sequence.pad_sequences(x_train, maxlen=max_len)
x_test = sequence.pad_sequences(x_test, maxlen=max_len)
model = models.Sequential()
model.add(layers.Embedding(max_features, 128,
input_length=max_len,
name='embed'))
model.add(layers.Conv1D(32, 7, activation='relu'))
model.add(layers.MaxPooling1D(5))
model.add(layers.Conv1D(32, 7, activation='relu'))
model.add(layers.GlobalMaxPooling1D())
model.add(layers.Dense(1))
model.summary()
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['acc'])
I used the tensorboard callback here.
callbacks = [
keras.callbacks.TensorBoard(
log_dir='my_log_dir',
histogram_freq=1,
embeddings_freq=1,
)
]
history = model.fit(x_train, y_train,
epochs=3,
batch_size=128,
validation_split=0.2,
callbacks=callbacks)
Then I got the following warning.
C:\Users\ktripat\Anaconda3\envs\tf2\lib\site-packages\keras\callbacks\tensorboard_v2.py:102: UserWarning: The TensorBoard callback does not support embeddings display when using TensorFlow 2.0. Embeddings-related arguments are ignored.
warnings.warn('The TensorBoard callback does not support.'
Please find any solution if you guys have any. Thank you in advance!
You will need to follow this guide.
It describes how to save the weights of your embedding layer in a way that you can visualize it in TensorBoard:
# Set up a logs directory, so Tensorboard knows where to look for files.
log_dir='/logs/imdb-example/'
if not os.path.exists(log_dir):
os.makedirs(log_dir)
# Save Labels separately on a line-by-line manner.
with open(os.path.join(log_dir, 'metadata.tsv'), "w") as f:
for subwords in encoder.subwords:
f.write("{}\n".format(subwords))
# Fill in the rest of the labels with "unknown".
for unknown in range(1, encoder.vocab_size - len(encoder.subwords)):
f.write("unknown #{}\n".format(unknown))
# Save the weights we want to analyze as a variable. Note that the first
# value represents any unknown word, which is not in the metadata, here
# we will remove this value.
weights = tf.Variable(model.layers[0].get_weights()[0][1:])
# Create a checkpoint from embedding, the filename and key are the
# name of the tensor.
checkpoint = tf.train.Checkpoint(embedding=weights)
checkpoint.save(os.path.join(log_dir, "embedding.ckpt"))
# Set up config.
config = projector.ProjectorConfig()
embedding = config.embeddings.add()
# The name of the tensor will be suffixed by `/.ATTRIBUTES/VARIABLE_VALUE`.
embedding.tensor_name = "embedding/.ATTRIBUTES/VARIABLE_VALUE"
embedding.metadata_path = 'metadata.tsv'
projector.visualize_embeddings(log_dir, config)
If you want to visualize during training, you can call this code in a save callback during training every X episodes using this.

Create keras model from another trained model

I am trying to create a new keras model from another trained keras model
Sample Code for model training referred from:
#TF version 2.2.0
from tensorflow.python.keras import models, layers
from tensorflow.python.keras.models import Sequential
from tensorflow.python import keras
import tensorflow as tf
from tensorflow.python.keras.layers import Dense
from tensorflow.keras.datasets import boston_housing
(x_train,y_train), (x_test,y_test) = boston_housing.load_data()
model = Sequential()
model.add(Dense(2, activation='relu', input_shape=(13,)))
model.add(Dense(1, activation='softmax'))
model.compile(optimizer='rmsprop',loss='categorical_crossentropy',metrics=['accuracy'])
model.fit(x_train, y_train,batch_size= 64,epochs= 1,validation_split=0.2)
Saving the model as json
json_obj = model.to_json()
new_model = keras.models.model_from_json(json_obj)
But after creating the new_model the weights are different:
model.get_weights() != new_model.get_weights()
This is the same case if I create new_model using from_config(). The question is, shouldn't the weight be the same for both model and new_model as I am creating new_model from model or my understanding is wrong? Any suggestions are helpful
to_json doesn't save the model's weights, but only the architecture.
Check here: to_json method
I recommend you to use save_model method.
If you want to copy the model to another directly, do the following:
new_model = tf.keras.models.clone_model(model)
new_model.set_weights(model.get_weights())

How to deploy cnn file

I have trained a model using this code...
https://github.com/shantanuo/pandas_examples/blob/master/tensorflow/simages_train_waiting.ipynb
My file is ready, but how do I deploy it?
https://s3.ap-south-1.amazonaws.com/studentimages162a/cnn.h5
I tried to use hosted solution panini.ai but it does not accept h5 files. I tried to convert it to csv but that did not work. I also tried to use flask
https://github.com/mtobeiyf/keras-flask-deploy-webapp
I got this error while trying to run the docker container...
# docker run -v /tmp/:/tmp/ -p 5000:5000 keras_flask_app
Using TensorFlow backend.
Traceback (most recent call last):
File "app.py", line 26, in <module>
model = load_model(MODEL_PATH)
File "/usr/local/lib/python2.7/site-packages/keras/engine/saving.py", line 419, in load_model
model = _deserialize_model(f, custom_objects, compile)
File "/usr/local/lib/python2.7/site-packages/keras/engine/saving.py", line 221, in _deserialize_model
model_config = f['model_config']
File "/usr/local/lib/python2.7/site-packages/keras/utils/io_utils.py", line 302, in __getitem__
raise ValueError('Cannot create group in read only mode.')
ValueError: Cannot create group in read only mode.
In other words how to use cnn.h5 file?
I am trying to use this code...
from keras.models import Sequential
from keras.layers import Dense, Activation
def build_model():
model = Sequential()
model.add(Dense(output_dim=64, input_dim=100))
model.add(Activation("relu"))
model.add(Dense(output_dim=10))
model.add(Activation("softmax"))
model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
return model
model2 = build_model()
model2.load_weights('cnn.h5')
And got the error:
ValueError: You are trying to load a weight file containing 4 layers into a model with 2 layers.
Concerning the first error, I the problem is that the flask app tries to load the complete model (i.e. with configuration):
model = load_model(MODEL_PATH)
whereas after the training you save only weights:
cnn.save_weights('cnn.h5')
Try to use cnn.save('cnn.h5') instead.
In the second case, your model definition does not match the trained model. Indeed, it is a completely different model with no Convolution layers at all. The corresponding model definition would be:
def build_model():
model = Sequential()
model.add(Conv2D(filters=32,
kernel_size=(2,2),
strides=(1,1),
padding='same',
input_shape=(IMG_SIZE,IMG_SIZE,NB_CHANNELS),
data_format='channels_last'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2),
strides=2))
model.add(Dropout(0.4))
model.add(Conv2D(filters=64,
kernel_size=(2,2),
strides=(1,1),
padding='valid'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2),
strides=2))
model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
return model
You can load the model in following way:
from tensorflow.keras.models import load_model
model = load_model('cnn.h5')
The training/test data can be loaded with the following code:
import h5py
import numpy as np
hf = h5py.File('cnn.h5', 'r')
Your trained model and the model you are trying to load differ.
Replace
cnn = Sequential()
with
cnn = build_model()

Calling a Keras model on a TensorFlow tensor but keep weights

In Keras as a simplified interface to TensorFlow: tutorial they describe how one can call a Keras model on a TensorFlow tensor.
from keras.models import Sequential
model = Sequential()
model.add(Dense(32, activation='relu', input_dim=784))
model.add(Dense(10, activation='softmax'))
# this works!
x = tf.placeholder(tf.float32, shape=(None, 784))
y = model(x)
They also say:
Note: by calling a Keras model, your are reusing both its architecture and its weights. When you are calling a model on a tensor, you are creating new TF ops on top of the input tensor, and these ops are reusing the TF Variable instances already present in the model.
I interpret this as that the weights of the model will be the same in y as in model. However, for me it seems like the weights in the resulting Tensorflow node are reinitialized. A minimal example can be seen below:
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
# Create model with weight initialized to 1
model = Sequential()
model.add(Dense(1, input_dim=1, kernel_initializer='ones',
bias_initializer='zeros'))
model.compile(loss='binary_crossentropy', optimizer='adam',
metrics=['accuracy'])
# Save the weights
model.save_weights('file')
# Create another identical model except with weight initialized to 0
model2 = Sequential()
model2.add(Dense(1, input_dim=1, kernel_initializer='zeros',
bias_initializer='zeros'))
model2.compile(loss='binary_crossentropy', optimizer='adam',
metrics=['accuracy'])
# Load the weight from the first model
model2.load_weights('file')
# Call model with Tensorflow tensor
v = tf.Variable([[1, ], ], dtype=tf.float32)
node = model2(v)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
print(sess.run(node), model2.predict(np.array([[1, ], ])))
# Prints (array([[ 0.]], dtype=float32), array([[ 1.]], dtype=float32))
Why I want to do this:
I want to use a trained network in another minimization scheme were the network "punish" places in the search space that are not allowed. So if you have ideas not involving this specific approach, that is also very appreciated.
Finally found the answer. There are two problems in the example from the question.
1:
The first and most obvious was that I called the tf.global_variables_intializer() function which will re-initialize all variables in the session. Instead I should have called the tf.variables_initializer(var_list) where var_list is a list of variables to initialize.
2:
The second problem was that Keras did not use the same session as the native Tensorflow objects. This meant that to be able to run the tensorflow object model2(v) with my session sess it needed to be reinitialized. Again Keras as a simplified interface to tensorflow: Tutorial was able to help
We should start by creating a TensorFlow session and registering it with Keras. This means that Keras will use the session we registered to initialize all variables that it creates internally.
import tensorflow as tf
sess = tf.Session()
from keras import backend as K
K.set_session(sess)
If we apply these changes to the example provided in my question we get the following code that does exactly what is expected from it.
from keras import backend as K
from keras.models import Sequential
from keras.layers import Dense
sess = tf.Session()
# Register session with Keras
K.set_session(sess)
model = Sequential()
model.add(Dense(1, input_dim=1, kernel_initializer='ones',
bias_initializer='zeros'))
model.compile(loss='binary_crossentropy', optimizer='adam',
metrics=['accuracy'])
model.save_weights('test')
model2 = Sequential()
model2.add(Dense(1, input_dim=1, kernel_initializer='zeros',
bias_initializer='zeros'))
model2.compile(loss='binary_crossentropy', optimizer='adam',
metrics=['accuracy'])
model2.load_weights('test')
v = tf.Variable([[1, ], ], dtype=tf.float32)
node = model2(v)
init = tf.variables_initializer([v, ])
sess.run(init)
print(sess.run(node), model2.predict(np.array([[1, ], ])))
# prints: (array([[ 1.]], dtype=float32), array([[ 1.]], dtype=float32))
Conclusion:
The lesson is that when mixing Tensorflow and Keras, make sure everything uses the same session.
Thanks for asking this question, and answering it, it helped me! In addition to setting the same tf session in the Keras backend, it is also important to note that if you want to load a Keras model from a file, you need to run a global variable initializer op before you load the model.
sess = tf.Session()
# make sure keras has the same session as this code
tf.keras.backend.set_session(sess)
# Do this BEFORE loading a keras model
init_op = tf.global_variables_initializer()
sess.run(init_op)
model = models.load_model('path/to/your/model.h5')

Keras error "You must feed a value for placeholder tensor 'bidirectional_1/keras_learning_phase' with dtype bool"

I get the following error for the code snippet below:
You must feed a value for placeholder tensor
'bidirectional_1/keras_learning_phase' with dtype bool
If I add the dropout layer model.add(Dropout(dropout)), it works. Anyone knows why? The back-end is Tensorflow, Keras 2.0.1
def prep_model1(embedding_layer1, embedding_layer2, dropout=0.5):
model0 = Sequential()
model0.add(embedding_layer1)
model0.add(Bidirectional(LSTM(128, return_sequences=False, dropout=dropout)))
model1 = Sequential()
model1.add(embedding_layer2)
model1.add(Bidirectional(LSTM(128, return_sequences=False, dropout=dropout)))
model = Sequential()
model.add(Merge([model0, model1], mode='concat', concat_axis=1))
#model.add(Dropout(dropout))
model.add(Dense(1, activation='sigmoid'))
return model
Try to import K and set learning phase before your model.
from keras import backend as K
K.set_learning_phase(1) #set learning phase
From this issue