Lambda function in tensorflow to do some changes to the data inside the model - tensorflow

My goal is changing the data inside the model using lambda function
the code fails at the last part in model.predict
can someone help me fix this or give me a similar one if you have?
import glob
import tensorflow as tf
import tensorflow_hub as hub
from os.path import basename, join
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
from keras.optimizers import Adam
from keras import layers
from sklearn.metrics import auc, average_precision_score
import numpy as np
import base64
import cv2
#this is the function that i want to call inside the model
#it take th data which in an image encoded base64 and it decode it back into a numpy array
def base64_decoder(inputs):
binary_data = base64.b64decode(inputs)
img = cv2.imdecode(np.frombuffer(binary_data, dtype=np.uint8), cv2.IMREAD_UNCHANGED)
return img
#this is the model i'm using to test
model_handle="https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet1k_s/feature_vector/2"
INPUT_SIZE=(331, 331,3)
model = tf.keras.Sequential([
tf.keras.layers.Lambda(base64_decoder),
tf.keras.layers.InputLayer(input_shape=INPUT_SIZE),
hub.KerasLayer(model_handle, trainable=True),
tf.keras.layers.Dropout(rate=0.2),
layers.Dense(1, activation='sigmoid')
])
model.compile(loss='binary_crossentropy', optimizer=Adam(), metrics=[tf.keras.metrics.AUC(curve='PR')])
# now i will test the model
im = np.random.rand(331,331,3)
img = np.expand_dims(im, axis=0)
#encode the random image
_, buffer = cv2.imencode('.jpg', im)
jpg_as_text = base64.b64encode(buffer)
value=jpg_as_text.decode('utf-8')
#predict
print(model.predict(value))
also i may change the lambda function to a full custom function, what do you think?

Related

Getting ValueError and TypeError while training model using resnet50

I am working on medical image classification using Resnet50 model. Whenever I try to flatten the layer I am getting this error.
ValueError: Attempt to convert a value (None) with an unsupported type (<class 'NoneType'>) to a Tensor.
My code is as below
from PIL import Image
import numpy as np
import tensorflow
from tensorflow.keras import layers
from tensorflow.keras.callbacks import Callback, ModelCheckpoint, ReduceLROnPlateau, TensorBoard, EarlyStopping
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from keras.utils.np_utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import cohen_kappa_score, accuracy_score
import scipy
from tensorflow.keras import backend as K
import gc
from functools import partial
from tqdm import tqdm
from sklearn import metrics
from collections import Counter
import json
import itertools
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D,GlobalAveragePooling2D
from keras.layers import Input, Lambda, Dense, Flatten
from keras.preprocessing import image
from glob import glob
pre_trained_model = tensorflow.keras.applications.ResNet50(input_shape=(224,224,3), include_top=False, weights="imagenet")
from keras.applications.resnet50 import ResNet50
from keras.models import Model
import keras
restnet = ResNet50(include_top=False, weights='imagenet', input_shape=(224,224,3))
output = restnet.layers[-1].output
output = keras.layers.Flatten()(output)
restnet = Model(restnet.input, output=output)
for layer in restnet.layers:
layer.trainable = False
restnet.summary()
I also tried adding the output layer this way:
last_layer = pre_trained_model.get_layer('conv5_block3_out')
print('last layer output shape:', last_layer.output_shape)
last_output = last_layer.output
x = GlobalAveragePooling2D()(last_output)
x = layers.Dropout(0.5)(x)
x = layers.Dense(3, activation='softmax')(x)
But got this error:
TypeError: Cannot convert a symbolic Keras input/output to a numpy array. This error may indicate that you're trying to pass a symbolic value to a NumPy call, which is not supported. Or, you may be trying to pass Keras symbolic inputs/outputs to a TF API that does not register dispatching, preventing Keras from automatically converting the API call to a lambda layer in the Functional Model.
I am unable to understand both errors, checked the soln given here, but that didn't solve my problem.
You are mixing tensorflow and keras libraries. Recommended to use only tensorflow.keras.* instead of keras.*.
Here is the modified code:
from PIL import Image
import numpy as np
import tensorflow
from tensorflow.keras import layers
from tensorflow.keras.callbacks import Callback, ModelCheckpoint, ReduceLROnPlateau, TensorBoard, EarlyStopping
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from keras.utils.np_utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import cohen_kappa_score, accuracy_score
import scipy
from tensorflow.keras import backend as K
import gc
from functools import partial
from tqdm import tqdm
from sklearn import metrics
from collections import Counter
import json
import itertools
from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D,GlobalAveragePooling2D
from tensorflow.keras.layers import Input, Lambda, Dense, Flatten
from tensorflow.keras.preprocessing import image
from glob import glob
pre_trained_model = tensorflow.keras.applications.ResNet50(input_shape=(224,224,3), include_top=False, weights="imagenet")
from keras.applications.resnet50 import ResNet50
from keras.models import Model
import keras
restnet = ResNet50(include_top=False, weights='imagenet', input_shape=(224,224,3))
output = restnet.layers[-1].output
output = tensorflow.keras.layers.Flatten()(output)
restnet = tensorflow.keras.models.Model(restnet.input, outputs=output)
for layer in restnet.layers:
layer.trainable = False
restnet.summary()

build two Sequential lstm networks

I have the following model, I want to build the same sequentional network and finally concate the outputs of the two network. Here is my model:
import numpy as np
import tensorflow as tf
from keras.models import Sequential, Model,load_model
from keras.layers import Dense, Dropout, Activation, Flatten, LSTM, Embedding, Input, concatenate, Lambda
from keras.utils import np_utils
from sklearn.metrics import mean_squared_error
#from keras.utils.vis_utils import plot_model
import keras
from keras_self_attention import SeqSelfAttention, SeqWeightedAttention
X1 = np.random.normal(size=(100,1,2))
X2 = np.random.normal(size=(100,1,2))
X3 = np.random.normal(size=(100,1,2))
Y = np.random.normal(size=(100,18))
model = Sequential()
model.add(LSTM(units=50, return_sequences=True, input_shape=(X1.shape[1],X1.shape[2])))
model.add(LSTM(units=50))
model.add(Dropout(0.2))
model.add(Dense(units=18))
model.compile(optimizer = 'adam', loss = 'mean_squared_error',metrics = ['MAE'])
model.fit(X1, Y, epochs =1, batch_size = 100)
Here is the model. I want to add the red part to the model. Can anybody help me? Thanks
it is better to use the Functionnal API to handle multiple inputs :
def sub_model(input):
mod1=LSTM(50,return_sequences=True)(input)
mod1=LSTM(50,dropout=0.2)(mod1)
return Dense(18)(mod1)
inp1=Input(shape=(1,2))
inp2=Input(shape=(1,2))
mod1=sub_model(inp1)
mod2=sub_model(inp2)
concat=Concatenate()([mod1,mod2])
output=Dense(18)(concat)
model=models.Model([inp1,inp2],output)
which gives you :
To train it, you can use model.fit() like :
model.fit([X1,X2],y)

TypeError: object of type 'Bidirectional' has no len()

This is the code I'm running:
import pandas as pd
import numpy as np
from tqdm import tqdm, trange
import unicodedata
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.models import Model
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import LSTM, Embedding, Dense, TimeDistributed, Dropout,
Bidirectional
# Defining Constants
# Maximum length of text sentences
MAXLEN = 180
# Number of LSTM units
LSTM_N = 150
# batch size
BS=48
input = Input(shape=(MAXLEN,))
model = Embedding(input_dim=n_words, output_dim=MAXLEN, input_length=MAXLEN)(input)
model = Dropout(0.2)(model)
model = Bidirectional(LSTM(units=LSTM_N, return_sequences=True, recurrent_dropout=0.1))(model)
out = TimeDistributed(Dense(n_tags, activation="softmax"))(model) # softmax output layer
model = Model(input, out)
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
history = model.fit(X, np.array(y), batch_size=BS, epochs=2, validation_split=0.05, verbose=1)
I'm running a bidirectional LSTM. But this error is appearing:
TypeError: object of type 'Bidirectional' has no len()
What's wrong in this code? Please help.
You are not doing consistent imports. Simply change,
from keras.models import Model
to
from tensorflow.keras.models import Model
Changing from
from tensorflow.keras.layers import Input
to
from keras.layers import Input
solved my issue.

what is the corresponding function of K.gradients for tensorflow 2.0?

I want to visualize the classification result with tensorflow2.0. For keras, it need the following code for cam:
import tensorflow as tf
import keras.backend as K
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.vgg16 import preprocess_input, decodpredictions
import numpy as np
import cv2
img_path = 'image/test.jpg'
model = VGG16(weights='imagenet')
img = image.load_img('image/test.jpg', target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = model.predict(x)
print('Predicted:', decode_predictions(preds, top=3)[0])
print np.argmax(preds[0])
african_elephant_output = model.output[:, 386]
last_conv_layer = model.get_layer('block5_conv3')
grads = K.gradients(african_elephant_output, last_conv_layer.output)[0]
But when I use tensorflow2.0, it seem no such gradient function. So what is the corresponding function for K.gradients for tensorflow2.0?
Here:
import keras.backend as K
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.vgg16 import preprocess_input, decodpredictions
You are mixing the keras and tf.keras packages, which are NOT compatible with each other. You should import backend from tf.keras:
import tensorflow.keras.backend as K

keras tensorflow load_weights fails

I am using keras 1.2 with tensorflow 1.0.0 backend.
I have a function that loads a pre-calibrated model from json and then loads its weights from a hdf5 file.
def load():
model = model_from_json(open(model_path).read())
model.load_weights(model_weights_path)
This function, more precisely the call to load_weights results in the following exception:
RuntimeError: The Session graph is empty. Add operations to the graph before calling run()
I wonder if that is due to these lines that I put in the beginning of my module to set the tensorflow seed for reproducibility:
tf.set_random_seed(123) # To set Tensorflow seed
sess = tf.Session()
keras.backend.set_session(sess)
It seems the keras session does not automatically set the loaded model as the graph associated to the session, hence failing to initialize the weights.
Any explanation and workaround to avoid the exception?
I pretty much am using the same code as you and it works for me.
from keras.models import Sequential
from keras.models import Model
from keras.layers import Dense, Dropout, Activation, Flatten, Input, GlobalAveragePooling2D
from keras.optimizers import RMSprop
from keras.utils import np_utils
from keras.models import model_from_json
from keras.layers import Convolution2D, MaxPooling2D
from keras.layers.pooling import AveragePooling2D
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import ZeroPadding2D
from keras.engine.topology import Merge
from keras.layers import merge
from keras.optimizers import Adam
from keras import backend as K
from keras.layers.pooling import MaxPooling2D
from keras.layers.convolutional import ZeroPadding2D
import PIL
import inception
import tensorflow as tf
import keras
import glob
import pandas as pd
import pickle
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
# load json and create model
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# load weights into new model
model.load_weights("model.h5")
print("Loaded model from disk")
model.summary()
model.compile(Adam(lr=0.0001), loss='categorical_crossentropy', metrics=['accuracy'])
score = model.predict(transfer_values_test)
Indeed it seems that Keras doesn't respect the session set by set_session when loading models.
Try forcing Keras to use a particular session by Tensorflow's context manager:
def load():
with sess.as_default():
model = model_from_json(open(model_path).read())
model.load_weights(model_weights_path)''
If Keras still complains, predefine a graph (graph=tf.Graph()) and force model.load_weights to use it by introducing an additional with statement:
def load():
with graph.as_default():
with sess.as_default():
model = model_from_json(open(model_path).read())
model.load_weights(model_weights_path)''