Running a single hidden layer MLP on MNIST, I get extremly different results for Keras and sklearn.
import numpy as np
np.random.seed(5)
import os
os.environ["CUDA_VISIBLE_DEVICES"] = '-1'
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense
from keras import regularizers
from keras.optimizers import Adam
from keras.utils import np_utils
from sklearn.neural_network import MLPClassifier
(x_train, y_train), (x_test, y_test) = mnist.load_data()
num_classes = 10
batch_data = x_train[:2000]
batch_labels = y_train[:2000]
# flat 2d images
batch_data_flat = batch_data.reshape(2000, 784)
# one-hot encoding
batch_labels_one_hot = np_utils.to_categorical(batch_labels, num_classes)
num_hidden_nodes = 100
alpha = 0.0001
batch_size = 128
beta_1 = 0.9
beta_2 = 0.999
epsilon = 1e-08
learning_rate_init = 0.001
epochs = 200
# keras
keras_model = Sequential()
keras_model.add(Dense(num_hidden_nodes, activation='relu',
kernel_regularizer=regularizers.l2(alpha),
kernel_initializer='glorot_uniform',
bias_initializer='glorot_uniform'))
keras_model.add(Dense(num_classes, activation='softmax',
kernel_regularizer=regularizers.l2(alpha),
kernel_initializer='glorot_uniform',
bias_initializer='glorot_uniform'))
keras_optim = Adam(lr=learning_rate_init, beta_1=beta_1, beta_2=beta_2, epsilon=epsilon)
keras_model.compile(optimizer=keras_optim, loss='categorical_crossentropy', metrics=['accuracy'])
keras_model.fit(batch_data_flat, batch_labels_one_hot, batch_size=batch_size, epochs=epochs, verbose=0)
# sklearn
sklearn_model = MLPClassifier(hidden_layer_sizes=(num_hidden_nodes,), activation='relu', solver='adam',
alpha=alpha, batch_size=batch_size, learning_rate_init=learning_rate_init,
max_iter=epochs, beta_1=beta_1, beta_2=beta_2, epsilon=epsilon)
sklearn_model.fit(batch_data_flat, batch_labels_one_hot)
# evaluate both on their training data
score_keras = keras_model.evaluate(batch_data_flat, batch_labels_one_hot)
score_sklearn = sklearn_model.score(batch_data_flat, batch_labels_one_hot)
print("Acc: keras %f, sklearn %f" % (score_keras[1], score_sklearn))
Outputs: Acc: keras 0.182500, sklearn 1.000000
The only difference I see is that scikit-learn computes for the Glorot initialization of the final layer sqrt(2 / (fan_in + fan_out)) vs. sqrt(6 / (fan_in + fan_out)) from Keras. But that should not cause such a difference I think. Do I forget something here?
scikit-learn 0.19.1, Keras 2.2.0 (Backend Tensorflow 1.9.0)
You should probably initialize the biases with 'zeros' and not with 'glorot_uniform'.
Related
I have a problem about my CNN model made using tensorflow. The goal is to predict the classes of satellite images, corresponding to the type of clouds (data extracted from the kaggle competition "Planet: Understanding the Amazon from Space"). There are 4 classes : clear, cloudy, partly cloudy and haze.
Everything works fine until I try to test the model on individual images. Then, it always predicts 2 classes and nothing else. I noticed that if I run the model again, it may predict 2 other classes among the 4. The model was trained for 10 epochs, which gave an accuracy of 0.8717.
Here is my code :
import numpy as np
import pandas as pd
import cv2
from tqdm import tqdm
import h5py
import os
os.listdir("/kaggle/input/")
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense,MaxPooling2D,Conv2D,Flatten,Dropout,Activation
from tensorflow.keras.layers import BatchNormalization
from sklearn import svm
from sklearn.model_selection import cross_val_score
import matplotlib.pyplot as plt
from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img
from oauth2client.client import GoogleCredentials
import csv
#from keras.optimizers import RMSprop
from tensorflow.keras import Input, Model
batch_size = 128
img_width = 256
img_height = 256
train_data = ImageDataGenerator(
rescale = 1./255,
validation_split = 0.25)
train_generator = train_data.flow_from_directory(
'../input/clouds',
target_size=(img_height, img_width),
color_mode='rgb',
batch_size=batch_size,
shuffle = True,
class_mode="categorical",
subset = 'training'
)
valid_generator = train_data.flow_from_directory(
'../input/clouds',
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical',
subset = 'validation'
)
num_classes = 4
model = Sequential([
Input(shape = [img_width, img_height, 3]),
Conv2D(128,4,activation = 'relu'),
MaxPooling2D(),
Conv2D(64,4,activation = 'relu'),
MaxPooling2D(),
Conv2D(32,4, activation = 'relu'),
MaxPooling2D(),
Conv2D(16,4,activation = 'relu'),
MaxPooling2D(),
Flatten(),
Dense(64, activation = 'relu'),
Dense(num_classes, activation = 'softmax')
])
model.compile(optimizer = "adam",
loss = 'categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_generator, validation_data = valid_generator, epochs = 10)
img_to_predict = cv2.imread('/kaggle/input/clouds-test/clouds_test/test_3877_6013089.jpg') #an augmented image from original dataset
img_to_predict = cv2.cvtColor(img_to_predict, cv2.COLOR_BGR2RGB)
img_to_predict = np.expand_dims(cv2.resize(img_to_predict, (256,256)), axis = 0)
res = model.predict(img_to_predict)
label_map = (train_generator.class_indices)
print(label_map)
print(list(label_map)[np.argmax(res, axis = -1)[0]])
Thank you for you help.
There is an imbalance two class classification problem with 12750 samples for class 0 and 2550 samples for class 1. I've gotten class weights using class_weight.compute_class_weight and fed them to model.fit. I've tested many loss and optimizer functions. The accuracy on test data is reasonable but loss and accuracy curves aren't normal, which are shown as below. I was wonder if some one give me a suggestion that how can I smooth the curves and fix this problem.
Thank you
import tensorflow as tf
import keras
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Flatten
from keras.layers import Conv2D, MaxPooling2D, UpSampling2D,Dropout, Conv1D
from sklearn.utils import class_weight
import scipy.io
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import sklearn.metrics as metrics
from sklearn.utils import class_weight
#General Variables
batch_size = 32
epochs = 100
num_classes = 2
#Load Data
# X_p300 = scipy.io.loadmat('D:/P300_challenge/BCI data- code 2005/code2005/p300Cas.mat',variable_names='p300Cas').get('p300Cas')
# X_np300 = scipy.io.loadmat('D:/P300_challenge/BCI data- code 2005/code2005/np300Cas.mat',variable_names='np300Cas').get('np300Cas')
X_p300 = scipy.io.loadmat('/content/drive/MyDrive/p300/p300Cas.mat',variable_names='p300Cas').get('p300Cas')
X_np300 = scipy.io.loadmat('/content/drive/MyDrive/p300/np300Cas.mat',variable_names='np300Cas').get('np300Cas')
X_np300=X_np300[:,:]
X_p300=X_p300[:,:]
X=np.concatenate((X_p300,X_np300))
X = np.expand_dims(X,2)
Y=np.zeros((15300,))
Y[0:2550]=1
#Shuffle data as it is now in order by row colunm index
print('Shuffling...')
X, Y = shuffle(X, Y)
#Split data between 80% Training and 20% Testing
print('Splitting...')
x_train, x_test, y_train, y_test = train_test_split(
X, Y, train_size=.8, test_size=.2, shuffle=True)
# determine the weight of each class
class_weights = class_weight.compute_class_weight('balanced',
np.unique(y_train),
y_train)
class_weights = {i:class_weights[i] for i in range(2)}
y_train = tf.keras.utils.to_categorical(y_train, num_classes)
y_test = tf.keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv1D(256,kernel_size=3,activation='relu', input_shape =(1680, 1)))
# model.add(Dropout(.5))
model.add(Flatten())
model.add(Dense(200, activation='relu'))
model.add(Dense(50, activation='relu'))
model.add(Dense(2, activation='softmax'))
model.compile(loss='mse',
optimizer='sgd',
metrics= ['acc'])
## use it when you want to apply weight of the classes
history = model.fit(x_train, y_train,class_weight=class_weights, validation_split = 0.3, epochs = epochs, verbose = 1)
#model.fit(x_train, y_train,batch_size=32,validation_split = 0.1, epochs = epochs, verbose = 1)
import matplotlib.pyplot as plt
history_dict = history.history
history_dict.keys()
loss_values = history_dict['loss']
val_loss_values = history_dict['val_loss']
acc = history_dict.get('acc')
epochs = range(1, len(acc) + 1)
plt.plot(epochs, loss_values, 'r--', label = 'Training loss')
plt.plot(epochs, val_loss_values, 'b', label = 'Validation_loss')
plt.title('Training and Validation Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
acc_values = history_dict['acc']
val_acc_values = history_dict['val_acc']
plt.plot(epochs, acc, 'r--', label = 'Training acc')
plt.plot(epochs, val_acc_values, 'b', label = 'Validation acc')
plt.title('Training and Validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('accuracy')
plt.legend()
plt.show()
model.summary()
test_loss, test_acc = model.evaluate(x_test, y_test)
print('test_acc:', test_acc)
I have Covid-19 X-ray dataset from Kaggle. I split and resize image in to the following dimension.
X_train (675, 256, 256, 3), X_test (225, 256, 256, 3) and X_val (225, 256, 256, 3). My code to train a densenet121 is the following
import numpy as np
import os
import random
from sklearn.utils import class_weight
from keras.layers import Dense, GlobalAveragePooling2D, Dropout, Input, Activation, BatchNormalization
from keras.applications import DenseNet121
from keras.models import Model
from keras import applications as A
from tensorflow.keras.models import load_model
from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
from keras.optimizers import SGD
seed_value = 1234
os.environ['PYTHONHASHSEED']=str(seed_value)
random.seed(seed_value)
np.random.seed(seed_value)
X_train = A.densenet.preprocess_input(X_train)
X_test = A.densenet.preprocess_input(X_test)
X_val = A.densenet.preprocess_input(X_val)
def get_model(hparams):
input_tensor = Input(shape=(256, 256, 3))
pretrain = DenseNet121(weights='imagenet', input_tensor=input_tensor, include_top=False)
idx = 52
x = pretrain.output
x = GlobalAveragePooling2D()(x)
x = Dense(64, use_bias=False)(x)
x = Dropout(0.25)(x)
x = BatchNormalization(axis=-1)(x)
x = Activation("relu")(x)
predictions = Dense(hparams["nclass"], activation="softmax")(x)
model = Model(inputs=pretrain.input, outputs=predictions)
for layer in model.layers:
if "BatchNormalization" in layer.__class__.__name__:
layer.trainable = True
else:
layer.trainable = False
for i in range(len(model.layers)):
if i > idx:
model.layers[i].trainable = True
model.compile(optimizer=SGD(lr=hparams["lr"]), loss="categorical_crossentropy", metrics=["accuracy"])
return model
weights = class_weight.compute_class_weight("balanced", classes=np.unique(y_train_labels), y=y_train_labels)
class_weights = dict(zip(np.unique(y_train_labels), weights))
es = EarlyStopping(monitor="val_loss",
mode="min",
patience=20,
verbose=1,
restore_best_weights=True)
mc = ModelCheckpoint(filepath="../models/mymodel.h5",
monitor="val_loss",
mode="min",
verbose=1,
save_best_only=True)
reduce_lr = ReduceLROnPlateau(monitor="val_loss",
factor=0.9,
patience=5,
min_lr=0.000001,
verbose=1)
history = model.fit(x=X_train,
y=y_train,
class_weight=class_weights,
validation_data=(X_val, y_val),
epochs=500,
batch_size=8,
callbacks=[es, mc, reduce_lr])
Prediction of shows probability of 3 classes (e.g. [0.1, 0.6, 0.3]) but when I load model later using this command.
classifier = load_model("mymodel.h5", compile=False)
probs = classifier.predict(X_test)
It seems that the prediction results is no longer probability but a class label (also incorrectly if we refer to the previous prediction [0.1, 0.6, 0.3] ... I got [0, 0, 1] as the output of the load model. I'm using keras version 2.3.1 and tensorflow 2.1.0. May I know what went wrong and how to fix it?
I am working on a stock prediction project and I just want to predict the gain and drop labels from the LSTM net. It is a binary classification problem.
However, my LSTM net is hard to converge even I reduce the training set a lot. Technically, it should overfit easily. But my prediction accuracy is still only 60% and loss is around 0.7 even I just feed 90 samples for training. So, I was thinking I probably made some mistakes in building the neural net. However, due to my limited ability, I cannot find the reason. Therefore, I really hope someone can take a look at my code and point out the reason! I will appreciate a lot!
My code is given below.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import collections
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers.advanced_activations import LeakyReLU
from keras.layers import Dense, LSTM, Dropout, Activation, Flatten ,BatchNormalization
from keras.utils import to_categorical, np_utils
from keras.optimizers import SGD
data = pd.read_csv("EURUSD_M5_201910210000_201910251140.csv", sep="\t")
train_cut = int(data.shape[0] * 0.8)
dataset_train = data[0:train_cut]
training_set = dataset_train["<OPEN>"].values
sc = MinMaxScaler(feature_range=(0, 1))
train_sec_scaled = sc.fit_transform(training_set.reshape(-1, 1))
X_train = []
y_train = []
step_size = 60
predic_days = 1
for i in range(step_size, len(train_sec_scaled) - predic_days):
X_train.append(train_sec_scaled[i - step_size : i, 0])
y_value = train_sec_scaled[i : i + predic_days, 0]
last_day_value = train_sec_scaled[i - 1, 0]
# 1 ==> up, 0 ==> down
if y_value[0] > last_day_value:
y_train.append([1])
else:
y_train.append([0])
X_train, y_train = np.array(X_train), np.array(y_train)
X_train = X_train.reshape(X_train.shape[0], X_train.shape[1], 1)
y_train = y_train.reshape(y_train.shape[0], y_train.shape[1])
y_train = np_utils.to_categorical(y_train, 2)
print(y_train)
print("train data generated!")
print(X_train.shape, y_train.shape)
def train():
model = Sequential()
model.add(Dropout(0.2))
model.add(LSTM(40))
model.add(BatchNormalization())
model.add(LeakyReLU(alpha=0.02))
model.add(Dropout(0.2))
model.add(Dense(30, kernel_initializer='glorot_normal'))
model.add(BatchNormalization())
model.add(LeakyReLU(alpha=0.02))
model.add(Dropout(0.2))
model.add(Dense(2, activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-4, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
model.fit(X_train,
y_train,batch_size=32,epochs=10000)
model.save("trend_analysis.h5")
print("model saved!")
if __name__ == "__main__":
train()
Also, here is the dataset I used:
https://drive.google.com/open?id=1r_0Ko1F6i0F1pToTSsQF1xGt_FTtpUux
Thanks in advance!
I am using Inception v3 model for identification of disease present in a Chest XRay image. For training I am using NIH Chest XRay Dataset. I have 14 different classes of diseases present in the dataset and also I have reduced the original image resolution to reduce the dataset size on disk. As I don't have a GPU I am using Google Colab to train my model and I am taking only 300 images per classs for all minority classes and 400 images for 'No Finding' class (Majority class). Please point out the bugs in my code if any and please suggest me some other approaches so that I can achieve better accuracy.
import numpy as np
import tensorflow as tf
import random as rn
import os
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(42)
rn.seed(12345)
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
from keras import backend as K
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
from keras.applications.inception_v3 import InceptionV3
from keras.preprocessing import image
from keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D
from keras.preprocessing.image import ImageDataGenerator
# from keras import backend as K
from keras.callbacks import ModelCheckpoint
from keras.callbacks import TensorBoard
from keras.layers.core import Flatten, Dense, Dropout, Reshape, Lambda
from keras.layers.normalization import BatchNormalization
from sklearn.preprocessing import LabelEncoder
from keras.utils.np_utils import to_categorical
from sklearn.metrics import log_loss
from sklearn.model_selection import train_test_split
# import os.path
'''F1 score calculation class'''
# import numpy as np
# from keras.callbacks import Callback
# from sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score
# class Metrics(Callback):
# def on_train_begin(self, logs={}):
# self.val_f1s = []
# self.val_recalls = []
# self.val_precisions = []
# def on_epoch_end(self, epoch, logs={}):
# val_predict = (np.asarray(self.model.predict(self.model.validation_data[0]))).round()
# val_targ = self.model.validation_data[1]
# _val_f1 = f1_score(val_targ, val_predict)
# _val_recall = recall_score(val_targ, val_predict)
# _val_precision = precision_score(val_targ, val_predict)
# self.val_f1s.append(_val_f1)
# self.val_recalls.append(_val_recall)
# self.val_precisions.append(_val_precision)
# print(" — val_f1: %f — val_precision: %f — val_recall %f" % (_val_f1, _val_precision, _val_recall))
# return
# metrics = Metrics()
# create the base pre-trained model
base_model = InceptionV3(weights='imagenet', include_top=False)
# dimensions of our images.
#Inception input size
img_width, img_height = 299, 299
top_layers_checkpoint_path = 'cp.top.best.hdf5'
fine_tuned_checkpoint_path = 'cp.fine_tuned.best.hdf5'
new_extended_inception_weights = 'final_weights.hdf5'
train_data_dir = 'drive/My Drive/Colab Notebooks/Sample-300-XRay-Dataset/train'
validation_data_dir = 'drive/My Drive/Colab Notebooks/Sample-300-XRay-Dataset/test'
nb_train_samples = 3528
nb_validation_samples = 896
top_epochs = 50
fit_epochs = 50
batch_size = 24
# add a global spatial average pooling layer
x = base_model.output
x = GlobalAveragePooling2D()(x)
# let's add a fully-connected layer
x = Dense(1024, activation='relu')(x)
x = BatchNormalization()(x)
#x =Dropout(0.2)(x)
x = Dense(512, activation='relu')(x)
x = BatchNormalization()(x)
#x= Dropout(0.3)(x)
# and a logistic layer -- we have 15 classes
predictions = Dense(15, activation='softmax')(x)
# this is the model we will train
model = Model(inputs=base_model.input, outputs=predictions)
if os.path.exists(top_layers_checkpoint_path):
model.load_weights(top_layers_checkpoint_path)
print ("Checkpoint '" + top_layers_checkpoint_path + "' loaded.")
# first: train only the top layers (which were randomly initialized)
# i.e. freeze all convolutional InceptionV3 layers
for layer in base_model.layers:
layer.trainable = False
# compile the model (should be done *after* setting layers to non-trainable)
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
# prepare data augmentation configuration
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical')
#Save the model after every epoch.
mc_top = ModelCheckpoint(top_layers_checkpoint_path, monitor='val_acc', verbose=0, save_best_only=True, save_weights_only=False, mode='auto', period=1)
#Save the TensorBoard logs.
tb = TensorBoard(log_dir='./logs', histogram_freq=0, write_graph=True, write_images=True)
# train the model on the new data for a few epochs
#model.fit_generator(...)
model.fit_generator(
train_generator,
samples_per_epoch=nb_train_samples // batch_size,
epochs=top_epochs,
validation_data=validation_generator,
nb_val_samples=nb_validation_samples // batch_size,
callbacks=[mc_top, tb])
# at this point, the top layers are well trained and we can start fine-tuning
# convolutional layers from inception V3. We will freeze the bottom N layers
# and train the remaining top layers.
# let's visualize layer names and layer indices to see how many layers
# we should freeze:
# for i, layer in enumerate(base_model.layers):
# print(i, layer.name)
#Save the model after every epoch.
mc_fit = ModelCheckpoint(fine_tuned_checkpoint_path, monitor='val_acc', verbose=0, save_best_only=True, save_weights_only=False, mode='auto', period=1)
if os.path.exists(fine_tuned_checkpoint_path):
model.load_weights(fine_tuned_checkpoint_path)
print ("Checkpoint '" + fine_tuned_checkpoint_path + "' loaded.")
# we chose to train the top 2 inception blocks, i.e. we will freeze
# the first 172 layers and unfreeze the rest:
for layer in model.layers[:172]:
layer.trainable = False
for layer in model.layers[172:]:
layer.trainable = True
# we need to recompile the model for these modifications to take effect
# we use SGD with a low learning rate
from keras.optimizers import SGD
model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy', metrics=['accuracy'])
# we train our model again (this time fine-tuning the top 2 inception blocks
# alongside the top Dense layers
#model.fit_generator(...)
model.fit_generator(
train_generator,
samples_per_epoch=nb_train_samples // batch_size,
epochs=fit_epochs,
validation_data=validation_generator,
nb_val_samples=nb_validation_samples // batch_size,
callbacks=[mc_fit, tb])
model.save_weights(new_extended_inception_weights)