keras tensorflow load_weights fails - tensorflow

I am using keras 1.2 with tensorflow 1.0.0 backend.
I have a function that loads a pre-calibrated model from json and then loads its weights from a hdf5 file.
def load():
model = model_from_json(open(model_path).read())
model.load_weights(model_weights_path)
This function, more precisely the call to load_weights results in the following exception:
RuntimeError: The Session graph is empty. Add operations to the graph before calling run()
I wonder if that is due to these lines that I put in the beginning of my module to set the tensorflow seed for reproducibility:
tf.set_random_seed(123) # To set Tensorflow seed
sess = tf.Session()
keras.backend.set_session(sess)
It seems the keras session does not automatically set the loaded model as the graph associated to the session, hence failing to initialize the weights.
Any explanation and workaround to avoid the exception?

I pretty much am using the same code as you and it works for me.
from keras.models import Sequential
from keras.models import Model
from keras.layers import Dense, Dropout, Activation, Flatten, Input, GlobalAveragePooling2D
from keras.optimizers import RMSprop
from keras.utils import np_utils
from keras.models import model_from_json
from keras.layers import Convolution2D, MaxPooling2D
from keras.layers.pooling import AveragePooling2D
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import ZeroPadding2D
from keras.engine.topology import Merge
from keras.layers import merge
from keras.optimizers import Adam
from keras import backend as K
from keras.layers.pooling import MaxPooling2D
from keras.layers.convolutional import ZeroPadding2D
import PIL
import inception
import tensorflow as tf
import keras
import glob
import pandas as pd
import pickle
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
# load json and create model
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# load weights into new model
model.load_weights("model.h5")
print("Loaded model from disk")
model.summary()
model.compile(Adam(lr=0.0001), loss='categorical_crossentropy', metrics=['accuracy'])
score = model.predict(transfer_values_test)

Indeed it seems that Keras doesn't respect the session set by set_session when loading models.
Try forcing Keras to use a particular session by Tensorflow's context manager:
def load():
with sess.as_default():
model = model_from_json(open(model_path).read())
model.load_weights(model_weights_path)''
If Keras still complains, predefine a graph (graph=tf.Graph()) and force model.load_weights to use it by introducing an additional with statement:
def load():
with graph.as_default():
with sess.as_default():
model = model_from_json(open(model_path).read())
model.load_weights(model_weights_path)''

Related

Lambda function in tensorflow to do some changes to the data inside the model

My goal is changing the data inside the model using lambda function
the code fails at the last part in model.predict
can someone help me fix this or give me a similar one if you have?
import glob
import tensorflow as tf
import tensorflow_hub as hub
from os.path import basename, join
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
from keras.optimizers import Adam
from keras import layers
from sklearn.metrics import auc, average_precision_score
import numpy as np
import base64
import cv2
#this is the function that i want to call inside the model
#it take th data which in an image encoded base64 and it decode it back into a numpy array
def base64_decoder(inputs):
binary_data = base64.b64decode(inputs)
img = cv2.imdecode(np.frombuffer(binary_data, dtype=np.uint8), cv2.IMREAD_UNCHANGED)
return img
#this is the model i'm using to test
model_handle="https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet1k_s/feature_vector/2"
INPUT_SIZE=(331, 331,3)
model = tf.keras.Sequential([
tf.keras.layers.Lambda(base64_decoder),
tf.keras.layers.InputLayer(input_shape=INPUT_SIZE),
hub.KerasLayer(model_handle, trainable=True),
tf.keras.layers.Dropout(rate=0.2),
layers.Dense(1, activation='sigmoid')
])
model.compile(loss='binary_crossentropy', optimizer=Adam(), metrics=[tf.keras.metrics.AUC(curve='PR')])
# now i will test the model
im = np.random.rand(331,331,3)
img = np.expand_dims(im, axis=0)
#encode the random image
_, buffer = cv2.imencode('.jpg', im)
jpg_as_text = base64.b64encode(buffer)
value=jpg_as_text.decode('utf-8')
#predict
print(model.predict(value))
also i may change the lambda function to a full custom function, what do you think?

Unable to Import Efficientnet in Colab

I am working in colab to test a code. While importing models, its giving error No module named 'efficientnet'
I am sharing the code and error here.
# for accessing tabular data
import pandas as pd
import numpy as np
import os
os.chdir('/content/drive/My Drive/')
# adding classweight
from sklearn.utils import class_weight
# Evaluation Metric
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix, precision_score, recall_score
# for visualization
import cv2
import matplotlib.pyplot as plt
import seaborn as sns
from prettytable import PrettyTable
# backend
import keras
from keras import backend as K
import tensorflow as tf
from keras.callbacks import Callback
# for transfer learning
from tensorflow.keras.applications import VGG16, VGG19
from tensorflow.keras.applications import DenseNet121
from tensorflow.keras.applications import ResNet50, ResNet152
from tensorflow.keras.applications import InceptionV3
from efficientnet.keras import EfficientNetB0, EfficientNetB3, EfficientNetB4
from keras.applications import Xception
# for model architecture
from keras.models import Sequential
from keras.layers import GlobalAveragePooling2D, Dropout, Dense, Conv2D, MaxPooling2D, Activation, Flatten
# for Tensorboard visualization
from keras.callbacks import TensorBoard
# for Data Augmentation
from keras.preprocessing.image import ImageDataGenerator
enter image description here
It should be,
from tensorflow.keras.applications import EfficientNetB0, EfficientNetB3, EfficientNetB4

keras.utils.layer_utils and keras.utils.generic_utils equivalent in tf.keras

So i've been following this guide (https://towardsdatascience.com/faster-r-cnn-object-detection-implemented-by-keras-for-custom-data-from-googles-open-images-125f62b9141a) when trying to implement my custom dataset for object detection with faster r-cnn. So in the source code to train the dataset, here are all the list of all Keras-related modules.
from keras import backend as K
from keras.optimizers import Adam, SGD, RMSprop
from keras.layers import Flatten, Dense, Input, Conv2D, MaxPooling2D, Dropout
from keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D, TimeDistributed
from keras.engine.topology import get_source_inputs
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.objectives import categorical_crossentropy
from keras.models import Model
from keras.utils import generic_utils
from keras.engine import Layer, InputSpec
from keras import initializers, regularizers
because i use the Tensorflow specific implementation of the Keras API, I convert all these above keras module to tensorflow.keras and so on. There's some minor adjustment like these:
from keras.engine.topology import get_source_inputs
to
from tensorflow.keras.utils import get_source_inputs
I've found most of the tensorflow.keras equivalent module, expect for two of these line:
from keras.utils import layer_utils
from keras.utils import generic_utils
I can't even find keras.utils documentation for those two modules on https://keras.io/api/utils/
How do i convert those two module into tensorflow.keras? I'm currently running tensorflow version 2.1.0 locally. And if there's now way that I can't import these module, is there any solution? I'm planning to train my data using my gpu locally.
In tensorflow 2.x, you can use
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.keras.utils import generic_utils
With current version of Tensorflow (2.8.2), the following code in working fine for me. So the above code can be changed with this code
from tensorflow.keras import backend as K
from tensorflow.keras.optimizers import Adam, SGD, RMSprop
from tensorflow.keras.layers import Flatten, Dense, Input, Conv2D, MaxPooling2D, Dropout
from tensorflow.keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D, TimeDistributed
from tensorflow.keras.utils import get_source_inputs
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.keras.utils.data_utils import get_file
from tensorflow.keras.metrics import categorical_crossentropy
from tensorflow.keras.models import Model
from tensorflow.python.keras.utils import generic_utils
from tensorflow.keras.layers import Layer, InputSpec
from tensorflow.keras import initializers, regularizers

Getting ValueError and TypeError while training model using resnet50

I am working on medical image classification using Resnet50 model. Whenever I try to flatten the layer I am getting this error.
ValueError: Attempt to convert a value (None) with an unsupported type (<class 'NoneType'>) to a Tensor.
My code is as below
from PIL import Image
import numpy as np
import tensorflow
from tensorflow.keras import layers
from tensorflow.keras.callbacks import Callback, ModelCheckpoint, ReduceLROnPlateau, TensorBoard, EarlyStopping
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from keras.utils.np_utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import cohen_kappa_score, accuracy_score
import scipy
from tensorflow.keras import backend as K
import gc
from functools import partial
from tqdm import tqdm
from sklearn import metrics
from collections import Counter
import json
import itertools
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D,GlobalAveragePooling2D
from keras.layers import Input, Lambda, Dense, Flatten
from keras.preprocessing import image
from glob import glob
pre_trained_model = tensorflow.keras.applications.ResNet50(input_shape=(224,224,3), include_top=False, weights="imagenet")
from keras.applications.resnet50 import ResNet50
from keras.models import Model
import keras
restnet = ResNet50(include_top=False, weights='imagenet', input_shape=(224,224,3))
output = restnet.layers[-1].output
output = keras.layers.Flatten()(output)
restnet = Model(restnet.input, output=output)
for layer in restnet.layers:
layer.trainable = False
restnet.summary()
I also tried adding the output layer this way:
last_layer = pre_trained_model.get_layer('conv5_block3_out')
print('last layer output shape:', last_layer.output_shape)
last_output = last_layer.output
x = GlobalAveragePooling2D()(last_output)
x = layers.Dropout(0.5)(x)
x = layers.Dense(3, activation='softmax')(x)
But got this error:
TypeError: Cannot convert a symbolic Keras input/output to a numpy array. This error may indicate that you're trying to pass a symbolic value to a NumPy call, which is not supported. Or, you may be trying to pass Keras symbolic inputs/outputs to a TF API that does not register dispatching, preventing Keras from automatically converting the API call to a lambda layer in the Functional Model.
I am unable to understand both errors, checked the soln given here, but that didn't solve my problem.
You are mixing tensorflow and keras libraries. Recommended to use only tensorflow.keras.* instead of keras.*.
Here is the modified code:
from PIL import Image
import numpy as np
import tensorflow
from tensorflow.keras import layers
from tensorflow.keras.callbacks import Callback, ModelCheckpoint, ReduceLROnPlateau, TensorBoard, EarlyStopping
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from keras.utils.np_utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import cohen_kappa_score, accuracy_score
import scipy
from tensorflow.keras import backend as K
import gc
from functools import partial
from tqdm import tqdm
from sklearn import metrics
from collections import Counter
import json
import itertools
from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D,GlobalAveragePooling2D
from tensorflow.keras.layers import Input, Lambda, Dense, Flatten
from tensorflow.keras.preprocessing import image
from glob import glob
pre_trained_model = tensorflow.keras.applications.ResNet50(input_shape=(224,224,3), include_top=False, weights="imagenet")
from keras.applications.resnet50 import ResNet50
from keras.models import Model
import keras
restnet = ResNet50(include_top=False, weights='imagenet', input_shape=(224,224,3))
output = restnet.layers[-1].output
output = tensorflow.keras.layers.Flatten()(output)
restnet = tensorflow.keras.models.Model(restnet.input, outputs=output)
for layer in restnet.layers:
layer.trainable = False
restnet.summary()

TypeError: object of type 'Bidirectional' has no len()

This is the code I'm running:
import pandas as pd
import numpy as np
from tqdm import tqdm, trange
import unicodedata
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.models import Model
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import LSTM, Embedding, Dense, TimeDistributed, Dropout,
Bidirectional
# Defining Constants
# Maximum length of text sentences
MAXLEN = 180
# Number of LSTM units
LSTM_N = 150
# batch size
BS=48
input = Input(shape=(MAXLEN,))
model = Embedding(input_dim=n_words, output_dim=MAXLEN, input_length=MAXLEN)(input)
model = Dropout(0.2)(model)
model = Bidirectional(LSTM(units=LSTM_N, return_sequences=True, recurrent_dropout=0.1))(model)
out = TimeDistributed(Dense(n_tags, activation="softmax"))(model) # softmax output layer
model = Model(input, out)
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
history = model.fit(X, np.array(y), batch_size=BS, epochs=2, validation_split=0.05, verbose=1)
I'm running a bidirectional LSTM. But this error is appearing:
TypeError: object of type 'Bidirectional' has no len()
What's wrong in this code? Please help.
You are not doing consistent imports. Simply change,
from keras.models import Model
to
from tensorflow.keras.models import Model
Changing from
from tensorflow.keras.layers import Input
to
from keras.layers import Input
solved my issue.