How to use the pretrained model in an application? - tensorflow

I dont understand how to use the trained model.
For example I trained the model using the code from https://towardsdatascience.com/what-is-a-recurrent-neural-network-and-implementation-of-simplernn-gru-and-lstm-models-in-keras-f7247e97c405.
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
imdb, info = tfds.load("imdb_reviews",with_info=True, as_supervised=True)
train_data, test_data = imdb['train'], imdb['test']
training_sentences = []
training_labels = []
testing_sentences = []
testing_labels = []
for s,l in train_data:
training_sentences.append(str(s.numpy()))
training_labels.append(l.numpy())
for s,l in test_data:
testing_sentences.append(str(s.numpy()))
testing_labels.append(l.numpy())
training_labels_final = np.array(training_labels)
testing_labels_final = np.array(testing_labels)
vocab_size = 10000
embedding_dim=16
max_length = 120
trunc_type= 'post'
oov_tok="<OOV>"
tokenizer = Tokenizer(num_words = vocab_size, oov_token=oov_tok)
tokenizer.fit_on_texts(training_sentences)
word_index = tokenizer.word_index
word_index
sequences = tokenizer.texts_to_sequences(training_sentences)
padded = pad_sequences(sequences, maxlen=max_length,
truncating = trunc_type)
testing_sequences = tokenizer.texts_to_sequences(testing_sentences)
testing_padded = pad_sequences(testing_sequences, maxlen=max_length)
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim,
input_length=max_length),
tf.keras.layers.SimpleRNN(32),
tf.keras.layers.Dense(10, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.summary()
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
num_epochs=30
history=model.fit(padded, training_labels_final, epochs=num_epochs, validation_data = (testing_padded, testing_labels_final))
It is all from this link.
Then I added the last line of code
model.save('name.model')
Then I open the new script where I want to use this model. I suggests that I will put a sentence ("He is a bad cat.") and I will get 0 or 1 depending wether it represents good sentiment or bad sentiment (I think I must get 0).
import tensorflow as tf
from tensorflow import keras
import numpy as np
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
model=tf.keras.models.load_model('name.model')
print(model.output_shape)
prediction=model.predict(["He is a bad cat."])
print(prediction)
And I get error.
QUESTION 1: In which format I must put this sentance? If I give this model to my friend how does he know in which format he must put the sentence to this model?
QUESTION 2: I noticed that the output will have (None,1) format. But I hoped to see one number (0 or 1) but not strange vector. What is going and how to get 0 or 1?

Before you make predictions you have to convert your text to input format.
For example, to make a prediction of "He is a bad cat." you have to tokenize the sentence.
#tokenizing the prediction data
sentence=['He is a bad cat.']
sequences=tokenizer.texts_to_sequences(sentence)
Then you have to add padding to it
#padding the tokenized sentence
padded=pad_sequences(sequences,maxlen=max_length,truncating=trunc_type)
Then you can make predictions on the padded sequence
model1.predict(padded) #output: array([[0.00079787]], dtype=float32)
The output will be a float not exactly 0 or 1 because the model will give the value based on the given sentence close to the negative or positive side on the embedding.
For more details please refer to this working gist. Thank You.

Related

Strange results from a neural network build using Keras

I build an sentiment classifier using Keras to predict if a sentence has a sentiment score of 1, 2, 3, 4 or 5. However I am getting some strange results. I will first show my code:
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.callbacks import EarlyStopping
import pandas as pd
import numpy as np
# the data only reflects the structure of the actual data
# the real data has way larger text and more entries
X_train = ['i am glad i heard about that', 'that is one ugly bike']
y_train = pd.Series(np.array([1, 4])) # pandas series
X_test = ['that hurted me']
y_test = pd.Series(np.array([1, 4])) # pandas series
# tokenizing
tokenizer = Tokenizer(num_words = 5)
tokenizer.fit_on_texts(X_train)
X_train_seq = tokenizer.texts_to_sequences(X_train)
X_test_seq = tokenizer.texts_to_sequences(X_test)
# performing some padding
padding_len = 4
X_train_seq_padded = pad_sequences(X_train_seq, maxlen = padding_len)
X_test_seq_padded = pad_sequences(X_test_seq, maxlen = padding_len)
# building the model
model = Sequential()
model.add(Dense(16, input_dim = padding_len, activation = 'relu', name = 'hidden-1'))
model.add(Dense(16, activation = 'relu', name = 'hidden-2'))
model.add(Dense(16, activation = 'relu', name = 'hidden-3'))
model.add(Dense(6, activation='softmax', name = 'output_layer'))
# compiling the model
model.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics=['accuracy'])
# training the model
callbacks = [EarlyStopping(monitor = 'accuracy', patience = 5, mode = 'max')]
my_model = model.fit(X_train_seq_padded, to_categorical(y_train), epochs = 100, batch_size = 1000, callbacks = callbacks, validation_data = (X_test, to_categorical(y_test)))
Using the actual data I keep getting results around 0.67xx (xx random numbers) which are reached after 1/2 epochs, no matter what changes to the code I introduce (and some are extreme).
I tried changing the padding to 1, 10, 100, 1000.
I tried removing the layer hidden-2 and hidden-3.
I tried adding stop word removal before tokenizing.
I tried using the tahn activation function in the hidden layers.
I used the sgd optimizer.
Example output of one setup:
Now my question is, is there something wrong with my code or are these actual possible results?

Time-Series LSTM Model wrong prediction

I am practicing how to create an LSTM model on a univariate series using this dataset from Kaggle: https://www.kaggle.com/sumanthvrao/daily-climate-time-series-data
My issue is that I am unable to get an accurate prediction of the temperature and my loss seems to be going all over the place. I have tried multiple methods including
Ensuring that time series data is stationary
Changing the time steps
Changing the hyperparameters
Using a stacked LSTM model
I am really curious as to what is wrong with my code although I do have a few hypothesis:
I made an error when preprocessing the data
I introduced stationarity wrongly
This dataset requires a multivariate approach
%tensorflow_version 2.x # this line is not required unless you are in a notebook
import tensorflow as tf
from numpy import array
from numpy import argmax
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import os
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
# preparing independent and dependent features
def prepare_data(timeseries_data, n_features):
X, y =[],[]
for i in range(len(timeseries_data)):
# find the end of this pattern
end_ix = i + n_features
# check if we are beyond the sequence
if end_ix > len(timeseries_data)-1:
break
# gather input and output parts of the pattern
seq_x, seq_y = timeseries_data[i:end_ix], timeseries_data[end_ix]
X.append(seq_x)
y.append(seq_y)
return np.array(X), np.array(y)
# preparing independent and dependent features
def prepare_x_input(timeseries_data, n_features):
x = []
for i in range(len(timeseries_data)):
# find the end of this pattern
end_ix = i + n_features
# check if we are beyond the sequence
if end_ix > len(timeseries_data):
break
# gather input and output parts of the pattern
seq_x = timeseries_data[i:end_ix]
x.append(seq_x)
x = x[-1:]
#remove non-stationerity
#x = np.log(x)
return np.array(x)
#read data and filter temperature column
df = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/Weather Parameter/DailyDelhiClimateTrain.csv')
df.head()
temp_df = df.pop('meantemp')
plt.plot(temp_df)
#make data stationery
sta_temp_df = np.log(temp_df).diff()
plt.figure(figsize=(15,5))
plt.plot(sta_temp_df)
print(sta_temp_df)
time_step = 7
x, y = prepare_data(sta_temp_df, time_step)
n_features = 1
x = x.reshape((x.shape[0], x.shape[1], n_features))
model = Sequential()
model.add(LSTM(10, return_sequences=True, input_shape=(time_step, n_features)))
model.add(LSTM(10))
model.add(Dense(16, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mse')
model.summary()
result = model.fit(x, y, epochs=800)
n_days = 113
pred_temp_df = list(temp_df)
test = sta_temp_df.copy()
sta_temp_df = list(sta_temp_df)
i = 0
while(i<n_days):
x_input = prepare_x_input(sta_temp_df, time_step)
print(x_input)
x_input = x_input.reshape((1, time_step, n_features))
#pass data into model
yhat = model.predict(x_input, verbose=0)
yhat.flatten
print(yhat[0][0])
sta_temp_df.append(yhat[0][0])
i = i+1
sta_temp_df[0] = np.log(temp_df[0])
cum_temp_df = np.exp(np.cumsum(sta_temp_df))
print(cum_temp_df)
My code is shown above. Would really appreciate if someone can identify what I did wrong here!

Keras predicting different output for same input image

am working on a classification problem for binary classes, I have finished the training and testing the model in single images now using the below code
import warnings
import time
from urllib.request import urlopen
import os
import urllib.request
start_time = time.time()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import numpy as np
from keras.preprocessing.image import img_to_array, load_img
from keras.models import Sequential
from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D
from keras.applications.vgg16 import VGG16
import tensorflow as tf
import logging
logging.getLogger('tensorflow').disabled = True
img_size = 224
class PersonPrediction:
def __init__(self):
self.class_dictionary = np.load(
'class_indices_vgg.npy',
allow_pickle=True).item()
self.top_model_weights_path = 'v2/weights/bottleneck_fc_model_2020-10-10-05.h5'
self.num_classes = len(self.class_dictionary)
self.model = self.create_model(self.num_classes)
self.graph = tf.compat.v1.get_default_graph()
def create_model(self, num_of_cls):
model = Sequential()
vgg_model = VGG16(include_top=False, weights='imagenet', input_shape=(img_size, img_size, 3))
for layer in vgg_model.layers[:-4]:
layer.trainable = False
model.add(vgg_model)
model.add(GlobalAveragePooling2D())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
return model
def predict(self, path=None, file_name=None):
if path:
image_path = path
path = self.url_to_image(image_path)
else:
path = os.path.join('imgs', file_name)
print("[INFO] loading and preprocessing image...")
image = load_img(path, target_size=(224, 224))
image = img_to_array(image)
# important! otherwise the predictions will be '0'
image = image / 255
image = np.expand_dims(image, axis=0)
label_idx = self.model.predict_classes(image)[0][0]
probability = self.model.predict(image)[0]
inv_map = {v: k for k, v in self.class_dictionary.items()}
label = inv_map[label_idx]
return label, probability[0]
path = 'temp.jpg'
tax_model = PersonPrediction()
label, proba = tax_model.predict(
file_name='frame303.jpg')
print(label, proba)
Problem is I keep getting chaning predictions of both label and accuracy every time I rerun the code, am not sure what is causing that
There are a number of sources that create randomness in the results when training a model. First the weights are randomly initialized so your model is starting from a different point in N space (N is the number of trainable parameters). Second layers like dropout have randomness in terms of which nodes will be nodes will be selected. Some GPU processes particularly with multi-processing can also have some degree of randomness. I have seen a number of posts on getting repeatable results in tensorflow but I have not found one that seems to really work. In general though the results should be reasonably close if your model is working correctly and you run enough epochs. Now once the model is trained and you use it for predictions as long as you use the same trained model you should get identical prediction results.

I want to convert my binary classification model to multiclass classification model I am taking labels using directory names

This is my code below it works fine for classification of two categories of images it takes labels based on directory names but whenever I add one more directory it stops working can someone help me
This is my code for image classification for images from two directories and two labels but when I convert it to three labels/ directories I get an error the error is posted below can someone help me solve the problem This if for image classification
I have tried removing the NumPy array I somewhere saw I need to just pass it through a CNN but I couldn't do that.
I am trying to make a classifier for pneumonia caused by a coronavirus and other disease using frontal chest x rays
from tensorflow.keras.preprocessing.image import ImageDataGeneratorfrom
from tensorflow.keras.applications import VGG16
from tensorflow.keras.layers import AveragePooling2D
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import to_categorical
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
import argparse
import cv2
import os
# construct the argument parser and parse the arguments
# initialize the initial learning rate, number of epochs to train for,
# and batch size
INIT_LR = 1e-3
EPOCHS = 40
BS = 66
# grab the list of images in our dataset directory, then initialize
# the list of data (i.e., images) and class images
print("[INFO] loading images...")
imagePaths = list(paths.list_images('/content/drive/My Drive/testset/'))
data = []
labels = []
# loop over the image paths
for imagePath in imagePaths:
# extract the class label from the filename
label = imagePath.split(os.path.sep)[-2]
# load the image, swap color channels, and resize it to be a fixed
# 224x224 pixels while ignoring aspect ratio
image = cv2.imread(imagePath)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (224, 224))
# update the data and labels lists, respectively
data.append(image)
labels.append(label)
# convert the data and labels to NumPy arrays while scaling the pixel
# intensities to the range [0, 255]
data = np.array(data) / 255.0
labels = np.array(labels)
# perform one-hot encoding on the labels
lb = LabelBinarizer()
labels = lb.fit_transform(labels)
labels = to_categorical(labels)
# partition the data into training and testing splits using 80% of
# the data for training and the remaining 20% for testing
(trainX, testX, trainY, testY) = train_test_split(data, labels,
test_size=0.20, stratify=labels, random_state=42)
# initialize the training data augmentation object
trainAug = ImageDataGenerator(
rotation_range=15,
fill_mode="nearest")
# load the VGG16 network, ensuring the head FC layer sets are left
# off
baseModel = VGG16(weights="imagenet", include_top=False,
input_tensor=Input(shape=(224, 224, 3)))
# construct the head of the model that will be placed on top of the
# the base model
headModel = baseModel.output
headModel = AveragePooling2D(pool_size=(4, 4))(headModel)
headModel = Flatten(name="flatten")(headModel)
headModel = Dense(64, activation="relu")(headModel)
headModel = Dropout(0.5)(headModel)
headModel = Dense(2, activation="softmax")(headModel)
# place the head FC model on top of the base model (this will become
# the actual model we will train)
model = Model(inputs=baseModel.input, outputs=headModel)
# loop over all layers in the base model and freeze them so they will
# *not* be updated during the first training process
for layer in baseModel.layers:
layer.trainable = False
# compile our model
print("[INFO] compiling model...")
opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])
# train the head of the network
print("[INFO] training head...")
H = model.fit(
trainAug.flow(trainX, trainY, batch_size=BS),
steps_per_epoch=len(trainX) // BS,
validation_data=(testX, testY),
validation_steps=len(testX) // BS,
epochs=EPOCHS)
# make predictions on the testing set
print("[INFO] evaluating network...")
predIdxs = model.predict(testX, batch_size=BS)
# for each image in the testing set we need to find the index of the
# label with corresponding largest predicted probability
predIdxs = np.argmax(predIdxs, axis=1)
# show a nicely formatted classification report
print(classification_report(testY.argmax(axis=1), predIdxs,
target_names=lb.classes_))
# compute the confusion matrix and and use it to derive the raw
# accuracy, sensitivity, and specificity
cm = confusion_matrix(testY.argmax(axis=1), predIdxs)
total = sum(sum(cm))
acc = (cm[0, 0] + cm[1, 1]) / total
sensitivity = cm[0, 0] / (cm[0, 0] + cm[0, 1])
specificity = cm[1, 1] / (cm[1, 0] + cm[1, 1])
# show the confusion matrix, accuracy, sensitivity, and specificity
print(cm)
print("acc: {:.4f}".format(acc))
print("sensitivity: {:.4f}".format(sensitivity))
print("specificity: {:.4f}".format(specificity))
# plot the training loss and accuracy
N = EPOCHS
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, N), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, N), H.history["accuracy"], label="train_acc")
plt.plot(np.arange(0, N), H.history["val_accuracy"], label="val_acc")
plt.title("Training Loss and Accuracy on COVID-19 Dataset")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="lower left")
plt.savefig("plot.png")
# serialize the model to disk
print("[INFO] saving COVID-19 detector model...")
model.save('/content/drive/My Drive/setcovid/model.h5', )
This is the error I got in my program
There are a few changes you need to make it work. The error you're getting is because of one-hot-encode. You're encoding your labels to one-hot twice.
lb = LabelBinarizer()
labels = lb.fit_transform(labels)
labels = to_categorical(labels)
Please remove the last line 'to_categorical' from your code. You will get the one-hot encode in the correct format. It will fix the error you're getting now.
And there is another problem I must mention. Your model output layer has only 2 neurons but you want to classify 3 classes. Please set the output layer neurons to 3.
headModel = Dense(3, activation="softmax")(headModel)
And you're now training with 3 classes, it's not binary anymore. You have to use another loss. I will recommend you to use categorical.
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
You also forgot to import the followings. Add these imports too.
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.layers import *
And you're good to go.
Btw, I'm pretty much afraid of the batch size(66) you're using. I don't know which GPU you have but still, I would suggest you decrease the batch size.

Best method for defining layers using tf keras api?

tensorflow.keras api not working on while creating the layers reference, any other methods of creating layers reference?
code :
layer=keras.layers
Error message : NameError: name 'leyer' is not defined
Full code is pasted here...
import tensorflow as tf
from tensorflow import keras
import pandas as pd
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import LabelEncoder
import numpy as np
#makin seed values
seed=7
np.random.seed(seed)
#setting up the dataset for training
dataframe=pd.read_csv("../datasets/iris.csv",header=None)
data=dataframe.values
input_x = data[:,0:4]
true_y = data[:,4]
#Encoding the true_y data to one hot encoding
le=LabelEncoder()
le.fit(true_y)
y_encoded = le.transform(true_y)
y_encoded = keras.utils.to_categorical(y_encoded,num_classes=3)
# creating the model
def base_fun():
layer=keras.layers
model = keras.models.Sequential()
model.add(layer.Dense(4,input_dim=4,kernel_initializer='normal',activation='relu'))
model.add(leyer.Dense(3, kernel_initializer='normal', activation='relu'))
estimator=keras.wrappers.scikit_learn.KerasClassifier(build_fn=base_fun,epochs=20,batch_size=10)
kfold = KFold(n_splits=10, shuffle=True, random_state=seed)
result = cross_val_score(estimator, input_x, y_encoded,cv=kfold)
print("Accuracy : %.2%% (%.2%%)" %(result.mean()*100, result.std()*100))
Well, this line:
model.add(leyer.Dnese(3, kernel_initializer='normal', activation='relu'))
has two typos, namely leyer should be layer and Dnese should be Dense like
model.add(layer.Dense(3, kernel_initializer='normal', activation='relu'))
Based on your comment, this line also causes an error:
estimator = keras.wrappers.scikit_learn.KerasClassifier( build_fn = base_fun, epochs = 20, batch_size = 10 )
From the Keras Scikit documentation:
build_fn should construct, compile and return a Keras model, which will then be used to fit/predict.
But you function base_fun() does not return anything. Append this line at the end of base_fun():
return model
As per your comment, the last print line could be changed to this (I don't know the % formatting, I generally use the syntax below):
print( "Accuracy : {:.2%} ({:.2%})".format( result.mean(), result.std() ) )