load_model error: ValueError: Improperly formatted model config when tensorflow.keras - tensorflow

I can load the model with load_model("model.h5") in colab and do a model.predict which works. But when I download the h5 file and run load_model locally, the load_model call gets an error "ValueError: Improperly formatted model config."
This is the model:
base_model=MobileNet(weights='imagenet',include_top=False) #imports the mobilenet model and discards the last 1000 neuron layer.
x=base_model.output
x=GlobalAveragePooling2D()(x)
x=Dense(1024,activation='relu')(x) #we add dense layers so that the model can learn more complex functions and classify for better results.
x=Dense(1024,activation='relu')(x) #dense layer 2
x=Dense(512,activation='relu')(x) #dense layer 3
preds=Dense(2,activation='softmax')(x) #final layer with softmax activation
using transfer learning
model=Model(inputs=base_model.input,outputs=preds)
for layer in model.layers[:20]:
layer.trainable=False
for layer in model.layers[20:]:
layer.trainable=True
then trained
train_generator=train_datagen.flow_from_directory('/content/chest_xray/train/',
target_size=(224,224),
color_mode='rgb',
batch_size=32,
class_mode='categorical', shuffle=True)
model.compile(optimizer='Adam',loss='categorical_crossentropy',metrics=['accuracy'])
# Adam optimizer
# loss function will be categorical cross entropy
# evaluation metric will be accuracy
step_size_train=train_generator.n//train_generator.batch_size
model.fit_generator(generator=train_generator,
steps_per_epoch=step_size_train,
epochs=5)
model saved
model.save('chest-xray-pneumonia.h5')
prediction works
ill_path = "/content/chest_xray/train/PNEUMONIA/"
good_path = "/content/chest_xray/train/NORMAL/"
ill_pic = ill_path + os.listdir(ill_path)[1]
good_pic = good_path + os.listdir(good_path)[1]
print(get_rez(ill_pic))
print(get_rez(good_pic))
But locally running in a Flask app python script main.py, it doesn't
from flask import render_template, jsonify, Flask, redirect, url_for, request
from app import app
import random
import os
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.mobilenet import preprocess_input, decode_predictions
import numpy as np
import ipdb
weightsPath = app.config['UPLOAD_FOLDER']
get error on the next line: ValueError: Improperly formatted model config.
new_model = load_model(os.path.join(app.config['UPLOAD_FOLDER'],"chest-xray-pneumonia.h5"))
def upload_file():
if request.method == 'POST':
f = request.files['file']
path = os.path.join(app.config['UPLOAD_FOLDER'], f.filename)
#f.save(os.path.join(app.config['UPLOAD_FOLDER'], f.filename))
ill_pic = os.path.join(app.config['UPLOAD_FOLDER'],
f.filename)
print(get_rez(ill_pic))

Related

AttributeError: Can't get attribute 'create_model' on <module '__main__'>

I have created a neural network model and created an ensemble learning model which is the voting model. I have combined a Neural network with random forest,and xgboost. Now I saved the model and try to load it to another Jupiter notebook but I get this error AttributeError: Can't get attribute 'create_model' on <module 'main'>
Here is the code to create the models and it in 1st notebook
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_val_score
import numpy
# Function to create model, required for KerasClassifier
def create_model(input_shape=66):
#x_shape= data_x.shape
#input_dim=x_shape[1]
# create model
model = Sequential()
model.add(Dense(12, input_dim=66, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1,activation='sigmoid'))
# Compile model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
seed = 7
numpy.random.seed(seed)
Kc_model = KerasClassifier(
create_model, # Pass in function
input_shape=66, # Pass in the dimensions to above function
epochs=100,
batch_size=32,
verbose=False)
Kc_model._estimator_type = "classifier"
Kc_model.fit(x_train, y_train, epochs=100,batch_size=10)
rf = RandomForestClassifier(max_depth=15, random_state=0)
rf.fit(x_train,y_train)
rf_y_pred = rf.predict(x_test)
#Model Score
print("The accuracy score for Random Forest Classifier is")
print("Accuracy:{}%".format(round(metrics.accuracy_score(y_test, rf_y_pred)*100)))
print("Training:{}%".format(round(rf.score(x_train, y_train)*100)))
print("Test set: {}%".format(round(rf.score(x_test, y_test)*100)))
xgboost_model = XGBClassifier()
xgboost_model.fit(x_train, y_train)
xgboost_y_pred = xgboost_model.predict(x_test)
print("The accuracy score for Voting XGB Classifier is")
print("Accuracy:{}%".format(round(metrics.accuracy_score(y_test, xgboost_y_pred)*100)))
print("Training:{}%".format(round(xgboost_model.score(x_train, y_train)*100)))
print("Test set: {}%".format(round(xgboost_model.score(x_test, y_test)*100)))
from keras.wrappers.scikit_learn import KerasClassifier
import scikeras
from tensorflow import keras
voting = VotingClassifier(
estimators = [('rf',rf),('xgboost_model',xgboost_model),('Kc_model',Kc_model) ],
voting='soft')
#reshaping=y_test.reshape(2712,1)
voting_model =voting.fit(x_train, y_train)
voting_pred = voting_model.predict(x_test)
#Model Score
print("The accuracy score for Voting Classifier is")
print("Training:{}%".format(round(voting_model.score(x_train, y_train)*100)))
print("Test set: {}%".format(round(voting_model.score(x_test, y_test)*100)))
import pickle
# save
with open('voting_model.pkl','wb') as f:
pickle.dump(Kc_model,f)
In the second notebook that I try to load the model , I get an error as you can see below
import pickle
import pandas as pd
with open('voting_model.pkl', 'rb') as f:
Kc_model = pickle.load(f)
The reason this happens is that the keras.wrappers.scikit_learn.KerasClassifier wrapper cannot be pickled. The model building function is not saved. Instead, you should pickle the fitted model:
import pickle
# save
with open('voting_model.pkl','wb') as f:
pickle.dump(Kc_model.model, f)
Now, you can load your model and use it as you wish.
with open('voting_model.pkl', 'rb') as f:
model = pickle.load(f)
# Predict something.
model.predict(X_test)
However, if you need a KerasClassifier instance after loading then you should re-wrap it. Then, you also need to save the classes_ attribute. Finally, now the build function would return the loaded pickle:
# Save this as well.
with open('voting_model_classes.pkl', 'wb') as f:
pickle.dump(Kc_model.classes_, f)
import pickle
from keras.wrappers.scikit_learn import KerasClassifier
def load_model():
with open('voting_model.pkl', 'rb') as f:
return pickle.load(f)
def load_classes():
with open('voting_model_classes.pkl', 'rb') as f:
return pickle.load(f)
Kc_model = KerasClassifier(
load_model,
epochs=100,
batch_size=32,
verbose=False)
Kc_model._estimator_type = "classifier"
# We need to manually call it because it will only be called once the classifier is re-fitted.
Kc_model.model = load_model()
Kc_model.classes_ = load_classes()
# Now you can use Kc_model as KerasClassifier.
The error is expected: the model building function gets pickled by name, and that name doesn't exist in your new notebook.
You could try SciKeras which has an initialize method (docs) which you can call to restore stuff like classes_ if you choose to serialize your Keras model using SavedModel directly (SciKeras's KerasClassifier will gladly accept a model instance).

Keras and tensorflow conflict when transfer learning on MobileNetV3

I'm trying to do transfer learning with MobileNetV3 in Keras but I'm having some issues.
from keras.models import Model
from keras.layers import GlobalMaxPooling2D, Dense, Dropout
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from tensorflow.keras.applications import MobileNetV3Small
import numpy as np
from tqdm import tqdm
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
pretrained_model = MobileNetV3Small(input_shape=(224,224,3),
weights="imagenet",
include_top=False)
# freeze all layers except the last one
for layer in pretrained_model.layers:
layer.trainable = False
pretrained_model.layers[-1].trainable = True
# combine the model with some extra layers for classification
last_output = pretrained_model.layers[-1].output
x = GlobalMaxPooling2D()(last_output)
x = Dense(128, activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(1, activation='sigmoid')(x)
model = Model(pretrained_model.input, x)
I get this error when I try to make the Dense layer:
TypeError: Cannot convert a symbolic Keras input/output to a numpy array. This error may indicate that you're trying to pass a symbolic value to a NumPy call, which is not supported. Or, you may be trying to pass Keras symbolic inputs/outputs to a TF API that does not register dispatching, preventing Keras from automatically converting the API call to a lambda layer in the Functional Model.
but it's fixed by adding the following code snippet:
from tensorflow.python.framework.ops import disable_eager_execution
disable_eager_execution()
When I include the code fix above, I get this error when I call model.fit():
FailedPreconditionError: 2 root error(s) found.
(0) Failed precondition: Could not find variable Conv_1_2/kernel. This could mean that the variable has been deleted. In TF1, it can also mean the variable is uninitialized. Debug info: container=localhost, status=Not found: Resource localhost/Conv_1_2/kernel/N10tensorflow3VarE does not exist.
[[{{node Conv_1_2/Conv2D/ReadVariableOp}}]]
[[_arg_dense_12_target_0_1/_100]]
(1) Failed precondition: Could not find variable Conv_1_2/kernel. This could mean that the variable has been deleted. In TF1, it can also mean the variable is uninitialized. Debug info: container=localhost, status=Not found: Resource localhost/Conv_1_2/kernel/N10tensorflow3VarE does not exist.
[[{{node Conv_1_2/Conv2D/ReadVariableOp}}]]
0 successful operations.
0 derived errors ignored.
How can I fix these issues and train the model?
From comments
Don't mix tf.keras and standalone keras. They are not compatible. Only use one of them (paraphrased from Frightera)
Working code as shown below
from tensorflow.keras.models import Model
from tensorflow.keras.layers import GlobalMaxPooling2D, Dense, Dropout
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.applications import MobileNetV3Small
import numpy as np
from tqdm import tqdm
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
pretrained_model = MobileNetV3Small(input_shape=(224,224,3),
weights="imagenet",
include_top=False)
# freeze all layers except the last one
for layer in pretrained_model.layers:
layer.trainable = False
pretrained_model.layers[-1].trainable = True
# combine the model with some extra layers for classification
last_output = pretrained_model.layers[-1].output
x = GlobalMaxPooling2D()(last_output)
x = Dense(128, activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(1, activation='sigmoid')(x)
model = Model(pretrained_model.input, x)

Issue retrieving error when adding a classifier to a MobileNet model

I have the following code, I am retrieving error when I try to add my own classifier.
import keras
from keras import layers,Model
from keras.layers import Input,GlobalAveragePooling2D,Flatten,Dense
MobileNetV2_model= tf.keras.applications.MobileNetV2(input_shape=None, alpha=1.0, include_top=False,
weights='imagenet')
#MobileNetV2_model.summary()
x= MobileNetV2_model.output
x = layers.GlobalAveragePooling2D()(x)
final_output=layers.Dense(2, activation='sigmoid')(x)
model = keras.Model(inputs=MobileNetV2.input, outputs = final_output)
model.compile(optimizer="adam", loss='BinaryCrossentropy', metrics=['accuracy'],loss_weights=0.1)
Error
TypeError: Cannot convert a symbolic Keras input/output to a numpy array. This error may indicate that
you're trying to pass a symbolic value to a NumPy call, which is not supported. Or, you may be trying
to pass Keras symbolic inputs/outputs to a TF API that does not register dispatching, preventing Keras from automatically converting the API call to a lambda layer in the Functional Model.
You should never mix keras and tf.keras. You can refer working code as shown below
import tensorflow as tf
from tensorflow.keras import layers, Model
MobileNetV2_model= tf.keras.applications.MobileNetV2(input_shape=(224,224,3), alpha=1.0, include_top=False, weights='imagenet')
#MobileNetV2_model.summary()
x= MobileNetV2_model.output
x = layers.GlobalAveragePooling2D()(x)
final_output=layers.Dense(2, activation='sigmoid')(x)
model = Model(inputs=MobileNetV2_model.input, outputs = final_output)
model.compile(optimizer="adam", loss='BinaryCrossentropy', metrics=['accuracy'],loss_weights=0.1)

Input 0 of layer fc1 is incompatible with the layer: expected axis -1 of input shape to have value 25088 but received input with shape (None, 32768)

I'm implementing SRGAN (and am not very experienced in this field), which uses a pre-trained VGG19 model to extract features. The following code was working fine on Keras 2.1.2 and tf 1.15.0 till yesterday. then it started throwing an "AttributeError: module 'keras.utils.generic_utils' has no attribute 'populate_dict_with_module_objects'" So i updated the keras version to 2.4.3 and tf to 2.5.0. but then its showing a "Input 0 of layer fc1 is incompatible with the layer: expected axis -1 of input shape to have value 25088 but received input with shape (None, 32768)" on the following line
features = vgg(input_layer)
But here the input has to be (256,256,3).
I had downgraded the keras and tf versions to the one I mentioned before to get rid of this error in the first place and it was working well till yesterday.
changing the input shape to (224,224,3) does not work. Any help in solving this error will be very appreciated.
import glob
import time
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import keras
from keras.layers import Input
from keras.applications.vgg19 import VGG19
from keras.callbacks import TensorBoard
from keras.layers import BatchNormalization, Activation, LeakyReLU, Add, Dense,Flatten
from keras.layers.convolutional import Conv2D, UpSampling2D
from keras.models import Model
from keras.optimizers import Adam
from scipy.misc import imread, imresize
from PIL import Image
def build_vgg():
input_shape = (256, 256, 3)
vgg = VGG19(weights="imagenet")
vgg.outputs = [vgg.layers[9].output]
input_layer = Input(shape=input_shape)
features = vgg(input_layer)
model = Model(inputs=[input_layer], outputs=[features])
return model
vgg = build_vgg()
vgg.trainable = False
vgg.compile(loss='mse', optimizer=common_optimizer, metrics=['accuracy'])
# Build and compile the discriminator
discriminator = build_discriminator()
discriminator.compile(loss='mse', optimizer=common_optimizer, metrics=['accuracy'])
# Build the generator network
generator = build_generator()
The Error message
Im using google colab
Importing keras from tensorflow and setting include_top=False in
vgg = VGG19(weights="imagenet",include_top=False)
seems to work.

Tensorflow-Keras reproducibility problem on Google Colab

I have a simple code to run on Google Colab (I use CPU mode):
import numpy as np
import pandas as pd
## LOAD DATASET
datatrain = pd.read_csv("gdrive/My Drive/iris_train.csv").values
xtrain = datatrain[:,:-1]
ytrain = datatrain[:,-1]
datatest = pd.read_csv("gdrive/My Drive/iris_test.csv").values
xtest = datatest[:,:-1]
ytest = datatest[:,-1]
import tensorflow as tf
from tensorflow.keras.layers import Dense, Activation
from tensorflow.keras.utils import to_categorical
## SET ALL SEED
import os
os.environ['PYTHONHASHSEED']=str(66)
import random
random.seed(66)
np.random.seed(66)
tf.set_random_seed(66)
from tensorflow.keras import backend as K
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
## MAIN PROGRAM
ycat = to_categorical(ytrain)
# build model
model = tf.keras.Sequential()
model.add(Dense(10, input_shape=(4,)))
model.add(Activation("sigmoid"))
model.add(Dense(3))
model.add(Activation("softmax"))
#choose optimizer and loss function
model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
# train
model.fit(xtrain, ycat, epochs=15, batch_size=32)
#get prediction
classes = model.predict_classes(xtest)
#get accuration
accuration = np.sum(classes == ytest)/len(ytest) * 100
I have read the setup to create a reproducibility code here Reproducible results using Keras with TensorFlow backend and I put all code in the same cell. But the result (e.g. the loss) is always different every time I run that cell (run the cell using shift + enter).
In my case, the result from the code above can be reproduced, if only:
I run using "runtime" > "restart and run all" or,
I put that code in a single file and run it using the command line (python3 file.py)
is there something I miss to make the result reproducible without restart the runtime?
You should also fix the seed for kernel_initializer in your Dense layers. So, your model will be like:
model = tf.keras.Sequential()
model.add(Dense(10, kernel_initializer=keras.initializers.glorot_uniform(seed=66), input_shape=(4,)))
model.add(Activation("sigmoid"))
model.add(Dense(3, kernel_initializer=keras.initializers.glorot_uniform(seed=66)))
model.add(Activation("softmax"))
I tried most of the solutions on the web and just the following codes worked for me :
seed=0
import os
os.environ['PYTHONHASHSEED'] = str(seed)
# For working on GPUs from "TensorFlow Determinism"
os.environ["TF_DETERMINISTIC_OPS"] = str(seed)
import numpy as np
np.random.seed(seed)
import random
random.seed(seed)
import tensorflow as tf
tf.random.set_seed(seed)
note that you should call this code before every run(at least for me)
if you want run your code on CPU:
seed=0
import os
os.environ['PYTHONHASHSEED'] = str(seed)
# For working on GPUs from "TensorFlow Determinism"
os.environ['CUDA_VISBLE_DEVICE'] = ''
import numpy as np
np.random.seed(seed)
import random
random.seed(seed)
import tensorflow as tf
tf.random.set_seed(seed)
I've tried to get Tensorflow 2.0 working reproducibly using Keras and Google Colab (CPU), with a version of the Iris dataset processing similar to that described above by #malioboro. This seems to work - might be useful:
# Install TensorFlow
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
# Setup repro section from Keras FAQ with TF1 to TF2 adjustments
import numpy as np
import tensorflow as tf
import random as rn
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
np.random.seed(42)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
rn.seed(12345)
# Force TensorFlow to use single thread.
# Multiple threads are a potential source of non-reproducible results.
# For further details, see: https://stackoverflow.com/questions/42022950/
session_conf = tf.compat.v1.ConfigProto(intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
# The below tf.set_random_seed() will make random number generation
# in the TensorFlow backend have a well-defined initial state.
# For further details, see:
# https://www.tensorflow.org/api_docs/python/tf/set_random_seed
tf.compat.v1.set_random_seed(1234)
sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph(), config=session_conf)
tf.compat.v1.keras.backend.set_session(sess)
# Rest of code follows ...
# Some adopted from: https://janakiev.com/notebooks/keras-iris/
# Some adopted from the question.
#
# Load Data
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder, StandardScaler
iris = load_iris()
X = iris['data']
y = iris['target']
names = iris['target_names']
feature_names = iris['feature_names']
# One hot encoding
enc = OneHotEncoder()
Y = enc.fit_transform(y[:, np.newaxis]).toarray()
# Scale data to have mean 0 and variance 1
# which is importance for convergence of the neural network
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
# Split the data set into training and testing
X_train, X_test, Y_train, Y_test = train_test_split(
X_scaled, Y, test_size=0.5, random_state=2)
n_features = X.shape[1]
n_classes = Y.shape[1]
## MAIN PROGRAM
from tensorflow.keras.layers import Dense, Activation
# build model
model = tf.keras.Sequential()
model.add(Dense(10, input_shape=(4,)))
model.add(Activation("sigmoid"))
model.add(Dense(3))
model.add(Activation("softmax"))
#choose optimizer and loss function
model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
# train
model.fit(X_train, Y_train, epochs=20, batch_size=32)
#get prediction
classes = model.predict_classes(X_test)