Getting very low accuracy while fine tuning Inception v3 pre trained model - tensorflow

I am using Inception v3 model for identification of disease present in a Chest XRay image. For training I am using NIH Chest XRay Dataset. I have 14 different classes of diseases present in the dataset and also I have reduced the original image resolution to reduce the dataset size on disk. As I don't have a GPU I am using Google Colab to train my model and I am taking only 300 images per classs for all minority classes and 400 images for 'No Finding' class (Majority class). Please point out the bugs in my code if any and please suggest me some other approaches so that I can achieve better accuracy.
import numpy as np
import tensorflow as tf
import random as rn
import os
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(42)
rn.seed(12345)
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
from keras import backend as K
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
from keras.applications.inception_v3 import InceptionV3
from keras.preprocessing import image
from keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D
from keras.preprocessing.image import ImageDataGenerator
# from keras import backend as K
from keras.callbacks import ModelCheckpoint
from keras.callbacks import TensorBoard
from keras.layers.core import Flatten, Dense, Dropout, Reshape, Lambda
from keras.layers.normalization import BatchNormalization
from sklearn.preprocessing import LabelEncoder
from keras.utils.np_utils import to_categorical
from sklearn.metrics import log_loss
from sklearn.model_selection import train_test_split
# import os.path
'''F1 score calculation class'''
# import numpy as np
# from keras.callbacks import Callback
# from sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score
# class Metrics(Callback):
# def on_train_begin(self, logs={}):
# self.val_f1s = []
# self.val_recalls = []
# self.val_precisions = []
# def on_epoch_end(self, epoch, logs={}):
# val_predict = (np.asarray(self.model.predict(self.model.validation_data[0]))).round()
# val_targ = self.model.validation_data[1]
# _val_f1 = f1_score(val_targ, val_predict)
# _val_recall = recall_score(val_targ, val_predict)
# _val_precision = precision_score(val_targ, val_predict)
# self.val_f1s.append(_val_f1)
# self.val_recalls.append(_val_recall)
# self.val_precisions.append(_val_precision)
# print(" — val_f1: %f — val_precision: %f — val_recall %f" % (_val_f1, _val_precision, _val_recall))
# return
# metrics = Metrics()
# create the base pre-trained model
base_model = InceptionV3(weights='imagenet', include_top=False)
# dimensions of our images.
#Inception input size
img_width, img_height = 299, 299
top_layers_checkpoint_path = 'cp.top.best.hdf5'
fine_tuned_checkpoint_path = 'cp.fine_tuned.best.hdf5'
new_extended_inception_weights = 'final_weights.hdf5'
train_data_dir = 'drive/My Drive/Colab Notebooks/Sample-300-XRay-Dataset/train'
validation_data_dir = 'drive/My Drive/Colab Notebooks/Sample-300-XRay-Dataset/test'
nb_train_samples = 3528
nb_validation_samples = 896
top_epochs = 50
fit_epochs = 50
batch_size = 24
# add a global spatial average pooling layer
x = base_model.output
x = GlobalAveragePooling2D()(x)
# let's add a fully-connected layer
x = Dense(1024, activation='relu')(x)
x = BatchNormalization()(x)
#x =Dropout(0.2)(x)
x = Dense(512, activation='relu')(x)
x = BatchNormalization()(x)
#x= Dropout(0.3)(x)
# and a logistic layer -- we have 15 classes
predictions = Dense(15, activation='softmax')(x)
# this is the model we will train
model = Model(inputs=base_model.input, outputs=predictions)
if os.path.exists(top_layers_checkpoint_path):
model.load_weights(top_layers_checkpoint_path)
print ("Checkpoint '" + top_layers_checkpoint_path + "' loaded.")
# first: train only the top layers (which were randomly initialized)
# i.e. freeze all convolutional InceptionV3 layers
for layer in base_model.layers:
layer.trainable = False
# compile the model (should be done *after* setting layers to non-trainable)
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
# prepare data augmentation configuration
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical')
#Save the model after every epoch.
mc_top = ModelCheckpoint(top_layers_checkpoint_path, monitor='val_acc', verbose=0, save_best_only=True, save_weights_only=False, mode='auto', period=1)
#Save the TensorBoard logs.
tb = TensorBoard(log_dir='./logs', histogram_freq=0, write_graph=True, write_images=True)
# train the model on the new data for a few epochs
#model.fit_generator(...)
model.fit_generator(
train_generator,
samples_per_epoch=nb_train_samples // batch_size,
epochs=top_epochs,
validation_data=validation_generator,
nb_val_samples=nb_validation_samples // batch_size,
callbacks=[mc_top, tb])
# at this point, the top layers are well trained and we can start fine-tuning
# convolutional layers from inception V3. We will freeze the bottom N layers
# and train the remaining top layers.
# let's visualize layer names and layer indices to see how many layers
# we should freeze:
# for i, layer in enumerate(base_model.layers):
# print(i, layer.name)
#Save the model after every epoch.
mc_fit = ModelCheckpoint(fine_tuned_checkpoint_path, monitor='val_acc', verbose=0, save_best_only=True, save_weights_only=False, mode='auto', period=1)
if os.path.exists(fine_tuned_checkpoint_path):
model.load_weights(fine_tuned_checkpoint_path)
print ("Checkpoint '" + fine_tuned_checkpoint_path + "' loaded.")
# we chose to train the top 2 inception blocks, i.e. we will freeze
# the first 172 layers and unfreeze the rest:
for layer in model.layers[:172]:
layer.trainable = False
for layer in model.layers[172:]:
layer.trainable = True
# we need to recompile the model for these modifications to take effect
# we use SGD with a low learning rate
from keras.optimizers import SGD
model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy', metrics=['accuracy'])
# we train our model again (this time fine-tuning the top 2 inception blocks
# alongside the top Dense layers
#model.fit_generator(...)
model.fit_generator(
train_generator,
samples_per_epoch=nb_train_samples // batch_size,
epochs=fit_epochs,
validation_data=validation_generator,
nb_val_samples=nb_validation_samples // batch_size,
callbacks=[mc_fit, tb])
model.save_weights(new_extended_inception_weights)

Related

Transfer learning InceptionV3 show poor validation accuracy with training

## MODEL IMPORTING ##
import tensorflow
import pandas as pd
import numpy as np
import os
import keras
import random
import cv2
import math
import seaborn as sns
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from tensorflow.keras.layers import Dense,GlobalAveragePooling2D,Convolution2D,BatchNormalization
from tensorflow.keras.layers import Flatten,MaxPooling2D,Dropout
from tensorflow.keras.applications import InceptionV3
from tensorflow.keras.applications.densenet import preprocess_input
from tensorflow.keras.preprocessing import image
from tensorflow.keras.preprocessing.image import ImageDataGenerator,img_to_array
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
import warnings
warnings.filterwarnings("ignore")
WIDTH = 299
HEIGHT = 299
CLASSES = 4
base_model = InceptionV3(weights='imagenet', include_top=False)
for layer in base_model.layers:
layer.trainable = False
x = base_model.output
x = GlobalAveragePooling2D(name='avg_pool')(x)
x = Dropout(0.4)(x)
predictions = Dense(CLASSES, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=predictions)
model.summary()
model.compile(optimizer='adam', ##also tried other optimiser --> similar poor accuracy found
loss='categorical_crossentropy',
metrics=['accuracy'])
## IMAGE DATA GENERATOR ##
from keras.applications.inception_v3 import preprocess_input
train_datagen = ImageDataGenerator(
preprocessing_function=preprocess_input,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest',
validation_split=0.2)
train_generator = train_datagen.flow_from_directory(
TRAIN_DIR,
target_size=(HEIGHT, WIDTH),
batch_size=BATCH_SIZE,
class_mode='categorical',
subset="training")
validation_generator = train_datagen.flow_from_directory(
TRAIN_DIR,
target_size=(HEIGHT, WIDTH),
batch_size=BATCH_SIZE,
class_mode='categorical',
subset="validation")
test_datagen = ImageDataGenerator(rescale=1./255)
generator_test = test_datagen.flow_from_directory(directory=TEST_DIR,
target_size=(HEIGHT, WIDTH),
batch_size=BATCH_SIZE,
class_mode='categorical',
shuffle=False)
## MODEL training ##
EPOCHS = 20
STEPS_PER_EPOCH = 320 #train_generator.n//train_generator.batch_size
VALIDATION_STEPS = 64 #validation_generator.n//validation_generator.batch_size
history = model.fit_generator(
train_generator,
epochs=EPOCHS,
steps_per_epoch=STEPS_PER_EPOCH,
validation_data=validation_generator,
validation_steps=VALIDATION_STEPS)
Result found:
VALIDATION ACCURACY around 0.55-0.67 fluctuating..
TRAINING ACCURACY 0.99
Questions:
What/Where is the problem in transfer learning process?
are train, validate and test data generator function parameter are chosen correctly?
Well I think you would be better off to train the entire model. So remove the code that makes the base model layers not trainable. If you look at the documentation for Inceptionv3 located here you can set pooling='max' which puts a GlobalMaxPooling2d layer as the output layer so if you do that you do not need to add your own layer as you did. Now I noticed you imported the callbacks ModelCheckpoint and ReduceLROnPlateau but you did not use them in model.fit. Using an adjustable learning rate will be beneficial to achieving a lower validation loss. ModelCheckpoint is useful to save the best model for use in predictions. See code below for implementations. save_loc is the directory where you want to store the results from ModelCheckpoint. NOTE in ModelCheckpoint I set save_weights_only=True. Reason is this is far faster than saving the entire model on each epoch for which the validation loss decreases.
checkpoint=tf.keras.callbacks.ModelCheckpoint(filepath=save_loc, monitor='val_loss', verbose=1, save_best_only=True,
save_weights_only=True, mode='auto', save_freq='epoch', options=None)
lr_adjust=tf.keras.callbacks.ReduceLROnPlateau( monitor="val_loss", factor=0.5, patience=1, verbose=1, mode="auto",
min_delta=0.00001, cooldown=0, min_lr=0)
callbacks=[checkpoint, lr_adjust]
history = model.fit_generator( train_generator, epochs=EPOCHS,
steps_per_epoch=STEPS_PER_EPOCH,validation_data=validation_generator,
validation_steps=VALIDATION_STEPS, callbacks=callbacks)
model.load_weights(save_loc) # load the saved weights
# after this use the model to evaluate or predict on the test set.
# if you are satisfied with the results you can then save the entire model with
model.save(save_loc)
Be careful on the test set generator. Ensure you do the same preprocessing on the test data as you did on the training data. I noticed you only rescaled the pixels. Not sure what the preprocess function does but I would use that.
I would also remove the dropout layer initially. Monitor the training loss and validation loss on each epoch and plot the results. If the training loss continues to decrease and the validation loss trends toward increasing then you are over fitting if so then restore the dropout layer if needed. If you do evaluation or prediction on the test set, you only want to go through the test set once. So select the test batch size to that no. of test samples/test batch size is an integer and use that integer as the number of test steps. Here is a
handy function that will determine that for you where length is the number of test samples and b_max is the maximum batch size you will allow based on your memory capacity.
def get_bs(length,b_max):
batch_size=sorted([int(length/n) for n in range(1,length+1) if length % n ==0 and length/n<=b_max],reverse=True)[0]
return batch_size,int(length/batch_size)
# example of use
batch_size, step=get_bs(10000,70)
#result is batch_size= 50 steps=200

Keras use trained InceptionV3 model + CIFAR10 got error about Batch Size

I am new to Machine Learning and Keras etc.
Trying to use trained model to increase accuracy, in my case I followed Jerry Kurata on Pluralsight to use InceptionV3 and only modify the last layer to train for recognizing birds.
The dataset I have is from Keras built-in CIFAR10 and here is the official tutorial
Here is the error message:
F tensorflow/stream_executor/cuda/cuda_dnn.cc:516] Check failed:
cudnnSetTensorNdDescriptor(handle_.get(), elem_type, nd, dims.data(),
strides.data()) == CUDNN_STATUS_SUCCESS (3 vs. 0)batch_descriptor:
{count: 32 feature_map_count: 288 spatial: %d 0%d 0 value_min:
0.000000 value_max: 0.000000 layout: BatchDepthYX} Aborted (core dumped)
I saw 1 possible cause from here
The image samples in CIFAR10 (32*32) is too small which cause this
issue
But I cannot figure out how to fix it.
Here is my code:
import matplotlib.pyplot as plt
import keras
from keras import backend as K
with K.tf.device("/device:GPU:0"):
config = K.tf.ConfigProto(intra_op_parallelism_threads=4,
inter_op_parallelism_threads=4, allow_soft_placement=True,
device_count = {'CPU' : 1, 'GPU' : 1})
session = K.tf.Session(config=config)
K.set_session(session)
from keras.callbacks import EarlyStopping
from keras.applications.inception_v3 import InceptionV3, preprocess_input
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import SGD
from keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D
from keras.datasets import cifar10
# "/device:GPU:0"
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def create_generator():
return ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
zca_epsilon=1e-06, # epsilon for ZCA whitening
rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)
# randomly shift images horizontally (fraction of total width)
width_shift_range=0.1,
# randomly shift images vertically (fraction of total height)
height_shift_range=0.1,
shear_range=0., # set range for random shear
zoom_range=0., # set range for random zoom
channel_shift_range=0., # set range for random channel shifts
# set mode for filling points outside the input boundaries
fill_mode='nearest',
cval=0., # value used for fill_mode = "constant"
horizontal_flip=True, # randomly flip images
vertical_flip=False, # randomly flip images
# set rescaling factor (applied before any other transformation)
rescale=None,
# set function that will be applied on each input
preprocessing_function=None,
# image data format, either "channels_first" or "channels_last"
data_format=None,
# fraction of images reserved for validation (strictly between 0 and 1)
validation_split=0.0)
Training_Epochs = 1
Batch_Size = 32
Number_FC_Neurons = 1024
Num_Classes = 10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, Num_Classes)
y_test = keras.utils.to_categorical(y_test, Num_Classes)
# load cifar10 data here https://keras.io/datasets/
datagen = create_generator()
datagen.fit(x_train)
Inceptionv3_model = InceptionV3(weights='imagenet', include_top=False)
print('Inception v3 model without last FC loaded')
x = Inceptionv3_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(Number_FC_Neurons, activation='relu')(x)
predictions = Dense(Num_Classes, activation='softmax')(x)
model = Model(inputs=Inceptionv3_model.input, outputs=predictions)
# print(model.summary())
print('\nFine tuning existing model')
Layers_To_Freeze = 172
for layer in model.layers[:Layers_To_Freeze]:
layer.trainable = False
for layer in model.layers[Layers_To_Freeze:]:
layer.trainable = True
model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy', metrics=['accuracy'])
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
cbk_early_stopping = EarlyStopping(monitor='val_acc', mode='max')
print(len(x_train))
history_transfer_learning = model.fit_generator(
datagen.flow(x_train, y_train, batch_size=Batch_Size),
epochs=Training_Epochs,
validation_data=(x_test, y_test),
workers=4,
steps_per_epoch=len(x_train)//Batch_Size,
callbacks=[cbk_early_stopping]
)
model.save('incepv3_transfer_cifar10.h5', overwrite=True, include_optimizer=True)
# Score trained model.
scores = model.evaluate(x_test, 12, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
Your error as you said is the input size difference. The pre trained Imagenet model takes a bigger size of image than the Cifar-10 (32, 32).
You need to specify the input_shape of the model before hand like this.
Inceptionv3_model = InceptionV3(weights='imagenet', include_top=False, input_shape=(32, 32, 3))
For more explanation you can check this tutorial.

Sequential' object has no attribute '_ckpt_saved_epoch' error when trying to save my model using callback on Keras

I got this error when i tried to save my MobileNet model.
Traceback (most recent call last): File "../src/script.py", line 150, in <module> callbacks=[cb_checkpointer, cb_early_stopper] File "/opt/conda/lib/python3.6/site-packages/keras/legacy/interfaces.py", line 91, in wrapper
return func(*args, **kwargs) File "/opt/conda/lib/python3.6/site-packages/keras/engine/training.py", line 1418, in fit_generator
initial_epoch=initial_epoch) File "/opt/conda/lib/python3.6/site-packages/keras/engine/training_generator.py", line 264, in fit_generator
callbacks.on_train_end() File "/opt/conda/lib/python3.6/site-packages/keras/callbacks.py", line 142, in on_train_end callback.on_train_end(logs) File "/opt/conda/lib/python3.6/site-packages/tensorflow/python/keras/callbacks.py", line 940, in on_train_end
if self.model._ckpt_saved_epoch is not None: AttributeError: 'Sequential' object has no attribute '_ckpt_saved_epoch'
I am using callbacks for saving:
filepath="weights-improvement-{epoch:02d}-{val_acc:.2f}.hdf5"
cb_early_stopper = EarlyStopping(monitor = 'val_loss', mode='min', verbose=1, patience = EARLY_STOP_PATIENCE)
cb_checkpointer = ModelCheckpoint(filepath = filepath, monitor = 'val_loss', save_best_only = True, mode = 'auto')
My model code:
import numpy as np
import cv2
import os
#from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense
from tensorflow.python.keras import optimizers
from keras import regularizers
from keras.regularizers import l2
from keras.layers import Dropout
from keras.applications.mobilenet import MobileNet
from keras.layers import GlobalAveragePooling2D, Dense, Dropout, Flatten, BatchNormalization
from keras.models import Sequential
from keras.applications.resnet50 import preprocess_input
from keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.keras.callbacks import EarlyStopping, ModelCheckpoint
#print(os.listdir("testset/"))
# Fixed for our classes
NUM_CLASSES = 3
# Fixed for color images
CHANNELS = 3
IMAGE_RESIZE = 224
RESNET50_POOLING_AVERAGE = 'avg'
DENSE_LAYER_ACTIVATION = 'softmax'
OBJECTIVE_FUNCTION = 'categorical_crossentropy'
# Common accuracy metric for all outputs, but can use different metrics for different output
LOSS_METRICS = ['accuracy']
# EARLY_STOP_PATIENCE must be < NUM_EPOCHS
NUM_EPOCHS = 100
EARLY_STOP_PATIENCE = 50
# These steps value should be proper FACTOR of no.-of-images in train & valid folders respectively
# Training images processed in each step would be no.-of-train-images / STEPS_PER_EPOCH_TRAINING
STEPS_PER_EPOCH_TRAINING = 27
STEPS_PER_EPOCH_VALIDATION = 11
# These steps value should be proper FACTOR of no.-of-images in train & valid folders respectively
# NOTE that these BATCH* are for Keras ImageDataGenerator batching to fill epoch step input
BATCH_SIZE_TRAINING = 8
BATCH_SIZE_VALIDATION = 8
base_mobilenet_model = MobileNet(include_top = False, weights = None)
model = Sequential()
model.add(BatchNormalization(input_shape = [224,224,3]))
model.add(base_mobilenet_model)
model.add(BatchNormalization())
model.add(GlobalAveragePooling2D())
model.add(Dropout(0.5))
# 2nd layer as Dense for 3-class classification,
model.add(Dense(NUM_CLASSES, activation = DENSE_LAYER_ACTIVATION,activity_regularizer=regularizers.l2(0.01)))
model.summary()
model.compile(optimizer = 'adam', loss = OBJECTIVE_FUNCTION, metrics = LOSS_METRICS)
image_size = IMAGE_RESIZE
shift = 0.2
# preprocessing_function is applied on each image but only after re-sizing & augmentation (resize => augment => pre-process)
# Each of the keras.application.resnet* preprocess_input MOSTLY mean BATCH NORMALIZATION (applied on each batch) stabilize the inputs to nonlinear activation functions
# Batch Normalization helps in faster convergence
# featurewise_center=True, featurewise_std_normalization=True,
data_generator = ImageDataGenerator(preprocessing_function=preprocess_input,
width_shift_range=shift,
height_shift_range=shift,
horizontal_flip=True,
vertical_flip=True,
rotation_range=45,
brightness_range=[0.2,1.0],
zoom_range=[0.5,1.0]
)
# flow_From_directory generates batches of augmented data (where augmentation can be color conversion, etc)
# Both train & valid folders must have NUM_CLASSES sub-folders
train_generator = data_generator.flow_from_directory(
'/kaggle/input/grade-dataset/trainset/',
target_size=(image_size, image_size),
batch_size=BATCH_SIZE_TRAINING,
class_mode='categorical')
validation_generator = data_generator.flow_from_directory(
'/kaggle/input/grade-dataset/testset/',
target_size=(image_size, image_size),
batch_size=BATCH_SIZE_VALIDATION,
class_mode='categorical')
# Max number of steps that these generator will have opportunity to process their source content
# len(train_generator) should be 'no. of available train images / BATCH_SIZE_TRAINING'
# len(valid_generator) should be 'no. of available train images / BATCH_SIZE_VALIDATION'
(BATCH_SIZE_TRAINING, len(train_generator), BATCH_SIZE_VALIDATION, len(validation_generator))
# Early stopping & checkpointing the best model in ../working dir & restoring that as our model for prediction
filepath="weights-improvement-{epoch:02d}-{val_acc:.2f}.hdf5"
cb_early_stopper = EarlyStopping(monitor = 'val_loss', mode='min', verbose=1, patience = EARLY_STOP_PATIENCE)
cb_checkpointer = ModelCheckpoint(filepath = filepath, monitor = 'val_loss', save_best_only = True, mode = 'auto')
fit_history = model.fit_generator(
train_generator,
steps_per_epoch=STEPS_PER_EPOCH_TRAINING,
epochs = NUM_EPOCHS,
validation_data=validation_generator,
validation_steps=STEPS_PER_EPOCH_VALIDATION,
callbacks=[cb_checkpointer, cb_early_stopper]
)
I solved the problem by replacing the code:
from tensorflow.python.keras.callbacks import EarlyStopping, ModelCheckpoint
with the code:
from keras.callbacks import EarlyStopping, ModelCheckpoint
Mentioning the Solution here, for the benefit of the Community.
Replacing the code,
from tensorflow.python.keras.callbacks import EarlyStopping, ModelCheckpoint
with the code,
from keras.callbacks import EarlyStopping, ModelCheckpoint
has resolved the error.

Compare the example of Pytorch and Keras on Cifar10 data

I use CIFAR10 dataset to learn how to code using Keras and PyTorch.
The environment is Python 3.6.7, Torch 1.0.0, Keras 2.2.4, Tensorflow 1.14.0.
I use the same batch size, number of epochs, learning rate and optimizer.
I use DenseNet121 as the model.
After training, Keras get 69% accuracy in test data.
PyTorch just get 54% in test data.
I know the results are different, but why is the result so bad in PyTorch?
Here is the Keras code:
import os, keras
from keras.datasets import cifar10
from keras.applications.densenet import DenseNet121
batch_size = 32
num_classes = 10
epochs = 20
# The data, split between train and test sets:
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# model
model = DenseNet121(include_top=True, weights=None, input_shape=(32,32,3), classes=10)
# initiate RMSprop optimizer
opt = keras.optimizers.SGD(lr=0.001, momentum=0.9)
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
shuffle=True)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
Here is the Pytorch code:
import torch
import torchvision
import torchvision.transforms as transforms
from torch import flatten
import torch.optim as optim
from torchvision import transforms, models
from torch.nn import Linear, Softmax, Module, Sequential, CrossEntropyLoss
import numpy as np
from tqdm import tqdm
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
transform = transforms.Compose([transforms.ToTensor()])
trainset = torchvision.datasets.CIFAR10(root='./DataSet', train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=32, shuffle=True, num_workers=0)
testset = torchvision.datasets.CIFAR10(root='./DataSet', train=False, download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=4, shuffle=False, num_workers=0)
import torch.nn as nn
import torch.nn.functional as F
class Net(Module):
def __init__(self):
super(Net, self).__init__()
self.funFeatExtra = Sequential(*[i for i in list(models.densenet121().children())[:-1]])
self.funFlatten = flatten
self.funOutputLayer = Linear(1024, 10)
self.funSoftmax = Softmax(dim=1)
def forward(self, x):
x = self.funFeatExtra(x)
x = self.funFlatten(x, 1)
x = self.funOutputLayer(x)
x = self.funSoftmax(x)
return x
net = Net()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
for epoch in range(20): # loop over the dataset multiple times
running_loss = 0.0
for i, data in tqdm(enumerate(trainloader, 0)):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net.cuda()(inputs.cuda())
loss = criterion(outputs, labels.cuda())
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
# if i % 2000 == 1999: # print every 2000 mini-batches
# print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 2000))
# running_loss = 0.0
print('Finished Training')
########################################################################
# The results seem pretty good.
#
# Let us look at how the network performs on the whole dataset.
correct = 0
total = 0
with torch.no_grad():
for data in tqdm(testloader):
images, labels = data
outputs = net.cpu()(images.cpu())
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (100 * correct / total))
You are not supposed to softmax the model output before you pass it to CrossEntropyLoss. Per the documentation:
This criterion combines nn.LogSoftmax() and nn.NLLLoss() in one single class.
...
The input is expected to contain raw, unnormalized scores for each class.
You can softmax them separately (outside of forward()) when calculating accuracy.

MLP totally different results for Keras and scikit-learn

Running a single hidden layer MLP on MNIST, I get extremly different results for Keras and sklearn.
import numpy as np
np.random.seed(5)
import os
os.environ["CUDA_VISIBLE_DEVICES"] = '-1'
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense
from keras import regularizers
from keras.optimizers import Adam
from keras.utils import np_utils
from sklearn.neural_network import MLPClassifier
(x_train, y_train), (x_test, y_test) = mnist.load_data()
num_classes = 10
batch_data = x_train[:2000]
batch_labels = y_train[:2000]
# flat 2d images
batch_data_flat = batch_data.reshape(2000, 784)
# one-hot encoding
batch_labels_one_hot = np_utils.to_categorical(batch_labels, num_classes)
num_hidden_nodes = 100
alpha = 0.0001
batch_size = 128
beta_1 = 0.9
beta_2 = 0.999
epsilon = 1e-08
learning_rate_init = 0.001
epochs = 200
# keras
keras_model = Sequential()
keras_model.add(Dense(num_hidden_nodes, activation='relu',
kernel_regularizer=regularizers.l2(alpha),
kernel_initializer='glorot_uniform',
bias_initializer='glorot_uniform'))
keras_model.add(Dense(num_classes, activation='softmax',
kernel_regularizer=regularizers.l2(alpha),
kernel_initializer='glorot_uniform',
bias_initializer='glorot_uniform'))
keras_optim = Adam(lr=learning_rate_init, beta_1=beta_1, beta_2=beta_2, epsilon=epsilon)
keras_model.compile(optimizer=keras_optim, loss='categorical_crossentropy', metrics=['accuracy'])
keras_model.fit(batch_data_flat, batch_labels_one_hot, batch_size=batch_size, epochs=epochs, verbose=0)
# sklearn
sklearn_model = MLPClassifier(hidden_layer_sizes=(num_hidden_nodes,), activation='relu', solver='adam',
alpha=alpha, batch_size=batch_size, learning_rate_init=learning_rate_init,
max_iter=epochs, beta_1=beta_1, beta_2=beta_2, epsilon=epsilon)
sklearn_model.fit(batch_data_flat, batch_labels_one_hot)
# evaluate both on their training data
score_keras = keras_model.evaluate(batch_data_flat, batch_labels_one_hot)
score_sklearn = sklearn_model.score(batch_data_flat, batch_labels_one_hot)
print("Acc: keras %f, sklearn %f" % (score_keras[1], score_sklearn))
Outputs: Acc: keras 0.182500, sklearn 1.000000
The only difference I see is that scikit-learn computes for the Glorot initialization of the final layer sqrt(2 / (fan_in + fan_out)) vs. sqrt(6 / (fan_in + fan_out)) from Keras. But that should not cause such a difference I think. Do I forget something here?
scikit-learn 0.19.1, Keras 2.2.0 (Backend Tensorflow 1.9.0)
You should probably initialize the biases with 'zeros' and not with 'glorot_uniform'.