Transfer learning InceptionV3 show poor validation accuracy with training - tensorflow

## MODEL IMPORTING ##
import tensorflow
import pandas as pd
import numpy as np
import os
import keras
import random
import cv2
import math
import seaborn as sns
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from tensorflow.keras.layers import Dense,GlobalAveragePooling2D,Convolution2D,BatchNormalization
from tensorflow.keras.layers import Flatten,MaxPooling2D,Dropout
from tensorflow.keras.applications import InceptionV3
from tensorflow.keras.applications.densenet import preprocess_input
from tensorflow.keras.preprocessing import image
from tensorflow.keras.preprocessing.image import ImageDataGenerator,img_to_array
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
import warnings
warnings.filterwarnings("ignore")
WIDTH = 299
HEIGHT = 299
CLASSES = 4
base_model = InceptionV3(weights='imagenet', include_top=False)
for layer in base_model.layers:
layer.trainable = False
x = base_model.output
x = GlobalAveragePooling2D(name='avg_pool')(x)
x = Dropout(0.4)(x)
predictions = Dense(CLASSES, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=predictions)
model.summary()
model.compile(optimizer='adam', ##also tried other optimiser --> similar poor accuracy found
loss='categorical_crossentropy',
metrics=['accuracy'])
## IMAGE DATA GENERATOR ##
from keras.applications.inception_v3 import preprocess_input
train_datagen = ImageDataGenerator(
preprocessing_function=preprocess_input,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest',
validation_split=0.2)
train_generator = train_datagen.flow_from_directory(
TRAIN_DIR,
target_size=(HEIGHT, WIDTH),
batch_size=BATCH_SIZE,
class_mode='categorical',
subset="training")
validation_generator = train_datagen.flow_from_directory(
TRAIN_DIR,
target_size=(HEIGHT, WIDTH),
batch_size=BATCH_SIZE,
class_mode='categorical',
subset="validation")
test_datagen = ImageDataGenerator(rescale=1./255)
generator_test = test_datagen.flow_from_directory(directory=TEST_DIR,
target_size=(HEIGHT, WIDTH),
batch_size=BATCH_SIZE,
class_mode='categorical',
shuffle=False)
## MODEL training ##
EPOCHS = 20
STEPS_PER_EPOCH = 320 #train_generator.n//train_generator.batch_size
VALIDATION_STEPS = 64 #validation_generator.n//validation_generator.batch_size
history = model.fit_generator(
train_generator,
epochs=EPOCHS,
steps_per_epoch=STEPS_PER_EPOCH,
validation_data=validation_generator,
validation_steps=VALIDATION_STEPS)
Result found:
VALIDATION ACCURACY around 0.55-0.67 fluctuating..
TRAINING ACCURACY 0.99
Questions:
What/Where is the problem in transfer learning process?
are train, validate and test data generator function parameter are chosen correctly?

Well I think you would be better off to train the entire model. So remove the code that makes the base model layers not trainable. If you look at the documentation for Inceptionv3 located here you can set pooling='max' which puts a GlobalMaxPooling2d layer as the output layer so if you do that you do not need to add your own layer as you did. Now I noticed you imported the callbacks ModelCheckpoint and ReduceLROnPlateau but you did not use them in model.fit. Using an adjustable learning rate will be beneficial to achieving a lower validation loss. ModelCheckpoint is useful to save the best model for use in predictions. See code below for implementations. save_loc is the directory where you want to store the results from ModelCheckpoint. NOTE in ModelCheckpoint I set save_weights_only=True. Reason is this is far faster than saving the entire model on each epoch for which the validation loss decreases.
checkpoint=tf.keras.callbacks.ModelCheckpoint(filepath=save_loc, monitor='val_loss', verbose=1, save_best_only=True,
save_weights_only=True, mode='auto', save_freq='epoch', options=None)
lr_adjust=tf.keras.callbacks.ReduceLROnPlateau( monitor="val_loss", factor=0.5, patience=1, verbose=1, mode="auto",
min_delta=0.00001, cooldown=0, min_lr=0)
callbacks=[checkpoint, lr_adjust]
history = model.fit_generator( train_generator, epochs=EPOCHS,
steps_per_epoch=STEPS_PER_EPOCH,validation_data=validation_generator,
validation_steps=VALIDATION_STEPS, callbacks=callbacks)
model.load_weights(save_loc) # load the saved weights
# after this use the model to evaluate or predict on the test set.
# if you are satisfied with the results you can then save the entire model with
model.save(save_loc)
Be careful on the test set generator. Ensure you do the same preprocessing on the test data as you did on the training data. I noticed you only rescaled the pixels. Not sure what the preprocess function does but I would use that.
I would also remove the dropout layer initially. Monitor the training loss and validation loss on each epoch and plot the results. If the training loss continues to decrease and the validation loss trends toward increasing then you are over fitting if so then restore the dropout layer if needed. If you do evaluation or prediction on the test set, you only want to go through the test set once. So select the test batch size to that no. of test samples/test batch size is an integer and use that integer as the number of test steps. Here is a
handy function that will determine that for you where length is the number of test samples and b_max is the maximum batch size you will allow based on your memory capacity.
def get_bs(length,b_max):
batch_size=sorted([int(length/n) for n in range(1,length+1) if length % n ==0 and length/n<=b_max],reverse=True)[0]
return batch_size,int(length/batch_size)
# example of use
batch_size, step=get_bs(10000,70)
#result is batch_size= 50 steps=200

Related

Getting Value Error while creating RNN model

Im getting this error: ValueError: logits and labels must have the same shape, received ((32, 1) vs (32, 23740))
This is my full code:
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense, Embedding, LSTM
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score, f1_score
import re
import numpy as np
from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
from keras.utils import pad_sequences
# All data processing stuff
data = pd.read_csv('test.csv') # Load the data
# convert Sentiment type to string
data['Sentiment'] = data['Sentiment'].astype(str)
# remove special characters and convert to lowercase
data["Tweet"] = data["Tweet"].apply(lambda x: x.lower()) # Convert all tweets to lowercase
data["Tweet"] = data["Tweet"].apply(lambda x: x.replace("[^a-zA-Z0-9]", "")) # Remove special characters
data["Sentiment"] = data["Sentiment"].apply(lambda x: x.lower())
# Initialize the Tokenizer
tokenizer = Tokenizer()
# Fit the Tokenizer on the text data
tokenizer.fit_on_texts(data["Tweet"] + data["Sentiment"])
token_sequences1 = tokenizer.texts_to_matrix(data["Tweet"])
token_sequences2 = tokenizer.texts_to_matrix(data["Sentiment"])
padded_sequences1 = pad_sequences(token_sequences1)
padded_sequences2 = pad_sequences(token_sequences2)
# split the data into training and testing sets
x_train, x_test, y_train, y_test = train_test_split(
padded_sequences1, padded_sequences2, test_size=0.2, random_state=42)
# create the RNN model
model = Sequential()
# 1000 is the number of words in the vocabulary, 128 is the dimension of the embedding vector,
model.add(Embedding(1000, 128))
model.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(1, activation='sigmoid'))
# compile the model with a specified loss function and optimizer
# binary_crossentropy is used for binary classification problems like this one (positive or negative sentiment)
# accuracy is the metric used to evaluate the model performance (the percentage of correct predictions)
# the loss function and the optimizer can be changed to see if the model performance improves or not
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
# train the model on the training data and put verbose=1 to see the training progress
model.fit(x_train, y_train, batch_size=32, epochs=10, verbose=1)
# evaluate the model on the testing data
y_pred = model.predict(x_test)
# calculate the evaluation metrics
accuracy = accuracy_score(y_test, y_pred)
precision = precision_score(y_test, y_pred)
recall = recall_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
# print the evaluation metrics
print("Accuracy: {:.2f}".format(accuracy))
print("Precision: {:.2f}".format(precision))
print("Recall: {:.2f}".format(recall))
print("F1 Score: {:.2f}".format(f1))
Im actually trying to create sentiment analyser on news headlines tweets from this data, here is a sample image of how my data looks in CSV.
Please provide a solution to this, I have tried every solution I had find

How to find class labels from a keras model

I am predicting classes, but there is something I don't get. In the simplified example below, I train a model to predict MNIST handwritten digits. My test set has an accuracy of 95%, when I use
model.evaluate(test_image, test_label)
However, when I use
model.predict(test_image)
and the extract the predicted labels using np.argmax(), this accuracy drops. When I run all the code again and again, this accuracy changes a lot.
I suspect now that the classes in the model are not ordered 0, 1 ... 9. Is there a way to see the class labels of a model? Or did I make another mistake?
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.datasets.mnist import load_data
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.models import Sequential
import numpy as np
# Load data
(train_image, train_label), (test_image, test_label) = load_data()
# Train
model = Sequential([
Flatten(input_shape=(28,28)),
Dense(100, activation="relu"),
Dense(100, activation="relu"),
Dense(10, activation="sigmoid")
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics='accuracy')
history = model.fit(train_image, train_label,
batch_size=32, epochs=50,
validation_data=(test_image, test_label),
verbose = 0)
eval = model.evaluate(test_image, test_label)
print('Accuracy (auto):', eval[1]) # This is always high
# Predict and evaluate manually
predictions = model.predict(test_image)
pred = np.array([np.argmax(pred) for pred in predictions])
true = test_label
print('Accuracy (manually):', np.mean(pred == true)) # This varies a lot

Model predict giving same results after fitting

Hi I am a beginner in ML and Tensorflow so do pardon me for not understanding complex theories.
I was building an image classifier CNN as a form of practice. The model is trained using MobileNetv2 and its supposed to classify images of cats, dogs and pandas. After training my model (with decent accuracy of 92%), I tried using model.predict() to evaluate how it does with new images but I noticed that all my outputs were 1. This happens even if I used the same previous training data. By the way, I used 2700 (900 from each class) images for training and 300 for validation.
Here is my code
%tensorflow_version 2.x # this line is not required unless you are in a notebook
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import os
import PIL
import PIL.Image
import tensorflow_datasets as tfds
import pathlib
from google.colab import drive
drive.mount('/content/gdrive')
IMAGE_SIZE=[150,150]
train_path = "/content/gdrive/MyDrive/Colab Notebooks/Cat_Dog_Panda_CNN/train"
test_path = "/content/gdrive/MyDrive/Colab Notebooks/Cat_Dog_Panda_CNN/test"
IMAGE_SHAPE=[150,150,3]
base_model = tf.keras.applications.MobileNetV2(input_shape=IMAGE_SHAPE,
include_top=False,
weights='imagenet')
base_model.trainable = False
global_average_layer = tf.keras.layers.GlobalAveragePooling2D()
prediction_layer = tf.keras.layers.Dense(3)
model = tf.keras.Sequential([
base_model,
global_average_layer,
prediction_layer
])
model.summary()
base_learning_rate = 0.0001
model.compile(optimizer=tf.keras.optimizers.RMSprop(lr=base_learning_rate),
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
# creates a data generator object that transforms images
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
test_datagen = ImageDataGenerator (rescale=1./255)
training_set = train_datagen.flow_from_directory(train_path, target_size=IMAGE_SIZE, batch_size=32, class_mode='categorical')
testing_set = test_datagen.flow_from_directory(test_path, target_size=IMAGE_SIZE, batch_size=32, class_mode='categorical')
model.fit(
training_set,
epochs=3,
validation_data=testing_set)
img = Image.open("/content/gdrive/MyDrive/Colab Notebooks/Cat_Dog_Panda_CNN/test/panda/panda_00094.jpg").convert('RGB').resize((150, 150), Image.ANTIALIAS)
img = np.array(img)
predictions = model.predict(img[None,:,:])
np.argmax(predictions[0])
in model.compile you have loss=tf.keras.losses.BinaryCrossentropy(from_logits=True). Since you have 3 classes you should have
loss=tf.keras.losses.CategoricalCrossentropy()
The Mobilenet model was trained with pixel values scaled within the range -1 to +1. So you should set rescale to
rescale=1/127.5-1
When you want to feed an image into model.predict you must perform the same preprocessing
of the image as you did for the training images namely rescale the image and resize the image to be the same size you used in training, namely (150,150).

Hyperopt deterministic model Keras with seed

I am trying to use hyperopt for a classification deep learning model with keras:
import numpy as np
import tensorflow as tf
import random as rn
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
np.random.seed(1)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
rn.seed(2)
# Force TensorFlow to use single thread.
# Multiple threads are a potential source of non-reproducible results.
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
from keras import backend as K
tf.set_random_seed(2)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
#importing libraries
import ...
from hyperas import optim
from hyperas.distributions import choice, uniform, randint
from hyperopt import Trials, STATUS_OK, tpe
def data():
return x_train_sequence, y_train, x_test_sequence, y_test
# ===============
# Model creation
# ===============
def create_model(x_train_sequence, y_train, x_test_sequence, y_test):
embedding_dim = {{choice([...])}}
lstm = {{choice([...])}}
num_epochs = {{choice([...])}}
dropout = {{uniform(0, 1)}}
batch_size = {{choice([32])}}
model = Sequential()
model.add(...)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=["binary_accuracy"])
# Fit the model and evaluate
result = model.fit(x_train_sequence, y_train,
batch_size=batch_size,
validation_data=(x_test_sequence, y_test), verbose=verbose, shuffle=True, epochs=num_epochs)
return {'loss': -validation_acc, 'status': STATUS_OK, 'model': model}
# ===============
# Apply model and find best run
# ===============
if __name__ == '__main__':
best_run, best_model = optim.minimize(model=create_model,
data=data,
algo=tpe.suggest,
rseed=np.random.seed(1),
max_evals=50,
trials=Trials())
X_train, Y_train, X_test, Y_test = data()
print("Evalutation of best performing model:")
print(best_model.evaluate(X_test, Y_test))
print("Best performing model chosen hyper-parameters:")
print(best_run)
Even though I thought I put all the necessary seeds to obtain reproducible cases. I keep getting different results even if I substitute the {{choice([...])}} with integers.
What am I missing? What should I add to seed the model properly?
Thanks so much in advance!

Getting very low accuracy while fine tuning Inception v3 pre trained model

I am using Inception v3 model for identification of disease present in a Chest XRay image. For training I am using NIH Chest XRay Dataset. I have 14 different classes of diseases present in the dataset and also I have reduced the original image resolution to reduce the dataset size on disk. As I don't have a GPU I am using Google Colab to train my model and I am taking only 300 images per classs for all minority classes and 400 images for 'No Finding' class (Majority class). Please point out the bugs in my code if any and please suggest me some other approaches so that I can achieve better accuracy.
import numpy as np
import tensorflow as tf
import random as rn
import os
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(42)
rn.seed(12345)
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
from keras import backend as K
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
from keras.applications.inception_v3 import InceptionV3
from keras.preprocessing import image
from keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D
from keras.preprocessing.image import ImageDataGenerator
# from keras import backend as K
from keras.callbacks import ModelCheckpoint
from keras.callbacks import TensorBoard
from keras.layers.core import Flatten, Dense, Dropout, Reshape, Lambda
from keras.layers.normalization import BatchNormalization
from sklearn.preprocessing import LabelEncoder
from keras.utils.np_utils import to_categorical
from sklearn.metrics import log_loss
from sklearn.model_selection import train_test_split
# import os.path
'''F1 score calculation class'''
# import numpy as np
# from keras.callbacks import Callback
# from sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score
# class Metrics(Callback):
# def on_train_begin(self, logs={}):
# self.val_f1s = []
# self.val_recalls = []
# self.val_precisions = []
# def on_epoch_end(self, epoch, logs={}):
# val_predict = (np.asarray(self.model.predict(self.model.validation_data[0]))).round()
# val_targ = self.model.validation_data[1]
# _val_f1 = f1_score(val_targ, val_predict)
# _val_recall = recall_score(val_targ, val_predict)
# _val_precision = precision_score(val_targ, val_predict)
# self.val_f1s.append(_val_f1)
# self.val_recalls.append(_val_recall)
# self.val_precisions.append(_val_precision)
# print(" — val_f1: %f — val_precision: %f — val_recall %f" % (_val_f1, _val_precision, _val_recall))
# return
# metrics = Metrics()
# create the base pre-trained model
base_model = InceptionV3(weights='imagenet', include_top=False)
# dimensions of our images.
#Inception input size
img_width, img_height = 299, 299
top_layers_checkpoint_path = 'cp.top.best.hdf5'
fine_tuned_checkpoint_path = 'cp.fine_tuned.best.hdf5'
new_extended_inception_weights = 'final_weights.hdf5'
train_data_dir = 'drive/My Drive/Colab Notebooks/Sample-300-XRay-Dataset/train'
validation_data_dir = 'drive/My Drive/Colab Notebooks/Sample-300-XRay-Dataset/test'
nb_train_samples = 3528
nb_validation_samples = 896
top_epochs = 50
fit_epochs = 50
batch_size = 24
# add a global spatial average pooling layer
x = base_model.output
x = GlobalAveragePooling2D()(x)
# let's add a fully-connected layer
x = Dense(1024, activation='relu')(x)
x = BatchNormalization()(x)
#x =Dropout(0.2)(x)
x = Dense(512, activation='relu')(x)
x = BatchNormalization()(x)
#x= Dropout(0.3)(x)
# and a logistic layer -- we have 15 classes
predictions = Dense(15, activation='softmax')(x)
# this is the model we will train
model = Model(inputs=base_model.input, outputs=predictions)
if os.path.exists(top_layers_checkpoint_path):
model.load_weights(top_layers_checkpoint_path)
print ("Checkpoint '" + top_layers_checkpoint_path + "' loaded.")
# first: train only the top layers (which were randomly initialized)
# i.e. freeze all convolutional InceptionV3 layers
for layer in base_model.layers:
layer.trainable = False
# compile the model (should be done *after* setting layers to non-trainable)
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
# prepare data augmentation configuration
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical')
#Save the model after every epoch.
mc_top = ModelCheckpoint(top_layers_checkpoint_path, monitor='val_acc', verbose=0, save_best_only=True, save_weights_only=False, mode='auto', period=1)
#Save the TensorBoard logs.
tb = TensorBoard(log_dir='./logs', histogram_freq=0, write_graph=True, write_images=True)
# train the model on the new data for a few epochs
#model.fit_generator(...)
model.fit_generator(
train_generator,
samples_per_epoch=nb_train_samples // batch_size,
epochs=top_epochs,
validation_data=validation_generator,
nb_val_samples=nb_validation_samples // batch_size,
callbacks=[mc_top, tb])
# at this point, the top layers are well trained and we can start fine-tuning
# convolutional layers from inception V3. We will freeze the bottom N layers
# and train the remaining top layers.
# let's visualize layer names and layer indices to see how many layers
# we should freeze:
# for i, layer in enumerate(base_model.layers):
# print(i, layer.name)
#Save the model after every epoch.
mc_fit = ModelCheckpoint(fine_tuned_checkpoint_path, monitor='val_acc', verbose=0, save_best_only=True, save_weights_only=False, mode='auto', period=1)
if os.path.exists(fine_tuned_checkpoint_path):
model.load_weights(fine_tuned_checkpoint_path)
print ("Checkpoint '" + fine_tuned_checkpoint_path + "' loaded.")
# we chose to train the top 2 inception blocks, i.e. we will freeze
# the first 172 layers and unfreeze the rest:
for layer in model.layers[:172]:
layer.trainable = False
for layer in model.layers[172:]:
layer.trainable = True
# we need to recompile the model for these modifications to take effect
# we use SGD with a low learning rate
from keras.optimizers import SGD
model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy', metrics=['accuracy'])
# we train our model again (this time fine-tuning the top 2 inception blocks
# alongside the top Dense layers
#model.fit_generator(...)
model.fit_generator(
train_generator,
samples_per_epoch=nb_train_samples // batch_size,
epochs=fit_epochs,
validation_data=validation_generator,
nb_val_samples=nb_validation_samples // batch_size,
callbacks=[mc_fit, tb])
model.save_weights(new_extended_inception_weights)