I have a problem about my CNN model made using tensorflow. The goal is to predict the classes of satellite images, corresponding to the type of clouds (data extracted from the kaggle competition "Planet: Understanding the Amazon from Space"). There are 4 classes : clear, cloudy, partly cloudy and haze.
Everything works fine until I try to test the model on individual images. Then, it always predicts 2 classes and nothing else. I noticed that if I run the model again, it may predict 2 other classes among the 4. The model was trained for 10 epochs, which gave an accuracy of 0.8717.
Here is my code :
import numpy as np
import pandas as pd
import cv2
from tqdm import tqdm
import h5py
import os
os.listdir("/kaggle/input/")
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense,MaxPooling2D,Conv2D,Flatten,Dropout,Activation
from tensorflow.keras.layers import BatchNormalization
from sklearn import svm
from sklearn.model_selection import cross_val_score
import matplotlib.pyplot as plt
from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img
from oauth2client.client import GoogleCredentials
import csv
#from keras.optimizers import RMSprop
from tensorflow.keras import Input, Model
batch_size = 128
img_width = 256
img_height = 256
train_data = ImageDataGenerator(
rescale = 1./255,
validation_split = 0.25)
train_generator = train_data.flow_from_directory(
'../input/clouds',
target_size=(img_height, img_width),
color_mode='rgb',
batch_size=batch_size,
shuffle = True,
class_mode="categorical",
subset = 'training'
)
valid_generator = train_data.flow_from_directory(
'../input/clouds',
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical',
subset = 'validation'
)
num_classes = 4
model = Sequential([
Input(shape = [img_width, img_height, 3]),
Conv2D(128,4,activation = 'relu'),
MaxPooling2D(),
Conv2D(64,4,activation = 'relu'),
MaxPooling2D(),
Conv2D(32,4, activation = 'relu'),
MaxPooling2D(),
Conv2D(16,4,activation = 'relu'),
MaxPooling2D(),
Flatten(),
Dense(64, activation = 'relu'),
Dense(num_classes, activation = 'softmax')
])
model.compile(optimizer = "adam",
loss = 'categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_generator, validation_data = valid_generator, epochs = 10)
img_to_predict = cv2.imread('/kaggle/input/clouds-test/clouds_test/test_3877_6013089.jpg') #an augmented image from original dataset
img_to_predict = cv2.cvtColor(img_to_predict, cv2.COLOR_BGR2RGB)
img_to_predict = np.expand_dims(cv2.resize(img_to_predict, (256,256)), axis = 0)
res = model.predict(img_to_predict)
label_map = (train_generator.class_indices)
print(label_map)
print(list(label_map)[np.argmax(res, axis = -1)[0]])
Thank you for you help.
Related
When I use return_sequences = true, for a LSTM layer, before adding a dense layer it sometimes results in an error depending upon the environment. I believe it mainly depends on the version of tensorflow and keras. If I am using tensorflow 2.1.0 and Keras 2.3.0, I get the following error -
standardize_input_data 'with shape ' + str(data_shape)) ValueError: Error when checking target: expected dense_2 to have 3 dimensions, but got array with shape (7000, 1)
However, if I use tensorflow 2.9.1 and keras 2.9.0 I do not get any error.
Here is some minimal working sample code -
import os
import pandas as pd
from sklearn import preprocessing
from collections import deque
import random
import numpy as np
import time
import random
from keras.models import Sequential
from keras.layers import Dense, Dropout, LSTM, BatchNormalization, Input
from keras.models import load_model
import keras
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
import pickle
epochs = 10
batch_size = 64
X, y = make_classification(n_samples=10000, n_features=3, n_classes=3, n_informative=3, n_redundant=0, n_repeated=0 ,weights=[0.5,0.5,0.5])
X = X.reshape(X.shape[0], 1, 3)
y = y.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
inputs = Input(shape=(X_train.shape[1:]))
outputs = LSTM(128, input_shape=(X_train.shape[1:]), return_sequences=True)(inputs)
outputs = Dropout(0.2)(outputs)
outputs = BatchNormalization()(outputs)
outputs = LSTM(128, input_shape=(X_train.shape[1:]), return_sequences=True)(outputs)
outputs = Dropout(0.2)(outputs)
outputs = BatchNormalization()(outputs)
outputs = Dense(32, activation="relu", kernel_initializer="glorot_uniform")(outputs)
outputs = Dropout(0.2)(outputs)
outputs = Dense(3, activation="softmax", kernel_initializer="glorot_uniform")(outputs)
model = keras.Model(inputs, outputs)
opt = keras.optimizers.Adam(lr=0.0001, decay=1e-6)
model.compile(loss='sparse_categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
model.fit(X_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(X_test, y_test))
I am working on a fine-grained classification to classify car models. So I have used transfer learning ResNet50. As per my knowledge it is performing fine while training. But when I try new images it is always predicting a single class. Below is my code.
For training:
from tensorflow.keras.layers import Input, Lambda, Dense, Flatten
from tensorflow.keras.models import Model
from tensorflow.keras.applications.resnet50 import ResNet50
from keras.applications.vgg16 import VGG16
from tensorflow.keras.applications.resnet50 import preprocess_input
from tensorflow.keras.preprocessing import image
from tensorflow.keras.preprocessing.image import ImageDataGenerator,load_img
from tensorflow.keras.models import Sequential
import matplotlib.pyplot as plt
import numpy as np
from glob import glob
IMAGE_SIZE = [224, 224]
train_path = 'Datasets/train'
valid_path = 'Datasets/test'
resnet = ResNet50(input_shape = IMAGE_SIZE + [3], weights='imagenet', include_top = False)
for layer in resnet.layers:
layer.trainable = False
folders = glob('Datasets/train/*') #training folders
x = Flatten()(resnet.output)
prediction = Dense(len(folders), activation='softmax') (x)
model = Model(inputs = resnet.input, outputs = prediction)
model.compile(
loss = 'categorical_crossentropy',
optimizer = 'adam',
metrics = ['accuracy']
)
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('Datasets/train',
target_size = (224, 224),
batch_size = 32,
class_mode = 'categorical')
test_set = test_datagen.flow_from_directory('Datasets/test',
target_size = (224, 224),
batch_size = 32,
class_mode = 'categorical')
r = model.fit_generator(
training_set,
validation_data=test_set,
epochs=200,
steps_per_epoch=len(training_set),
validation_steps=len(test_set)
)
from tensorflow.keras.models import load_model
model.save('model_updateV1.h5')
y_pred = model.predict(test_set)
import numpy as np
y_pred = np.argmax(y_pred, axis=1)
For Trying New Images:
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing import image
import numpy as np
from tensorflow.keras.applications.resnet50 import preprocess_input
model = load_model('model_updateV1.h5')
img = image.load_img('Datasets/test/mercedes/45.jpg', target_size=(224,224))
x = image.img_to_array(img)
x = x/255.
x = np.expand_dims(x, axis = 0)
img_data = preprocess_input(x)
img_data.shape
model.predict(img_data)
a = np.argmax(model.predict(img_data), axis=1)
a
I think your problem is that you are rescaling the images twice. You have code
x=x/255
then you expand the dimensions which is fine. However you then have code
img_data = preprocess_input(x)
The preprocess_input functon I believe rescales the pixel values between -1 and +1 with the code
x=x/127.5-1.
So now your pixel value have been scaled down twice. So just delete the code
x=x/255
I have Covid-19 X-ray dataset from Kaggle. I split and resize image in to the following dimension.
X_train (675, 256, 256, 3), X_test (225, 256, 256, 3) and X_val (225, 256, 256, 3). My code to train a densenet121 is the following
import numpy as np
import os
import random
from sklearn.utils import class_weight
from keras.layers import Dense, GlobalAveragePooling2D, Dropout, Input, Activation, BatchNormalization
from keras.applications import DenseNet121
from keras.models import Model
from keras import applications as A
from tensorflow.keras.models import load_model
from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
from keras.optimizers import SGD
seed_value = 1234
os.environ['PYTHONHASHSEED']=str(seed_value)
random.seed(seed_value)
np.random.seed(seed_value)
X_train = A.densenet.preprocess_input(X_train)
X_test = A.densenet.preprocess_input(X_test)
X_val = A.densenet.preprocess_input(X_val)
def get_model(hparams):
input_tensor = Input(shape=(256, 256, 3))
pretrain = DenseNet121(weights='imagenet', input_tensor=input_tensor, include_top=False)
idx = 52
x = pretrain.output
x = GlobalAveragePooling2D()(x)
x = Dense(64, use_bias=False)(x)
x = Dropout(0.25)(x)
x = BatchNormalization(axis=-1)(x)
x = Activation("relu")(x)
predictions = Dense(hparams["nclass"], activation="softmax")(x)
model = Model(inputs=pretrain.input, outputs=predictions)
for layer in model.layers:
if "BatchNormalization" in layer.__class__.__name__:
layer.trainable = True
else:
layer.trainable = False
for i in range(len(model.layers)):
if i > idx:
model.layers[i].trainable = True
model.compile(optimizer=SGD(lr=hparams["lr"]), loss="categorical_crossentropy", metrics=["accuracy"])
return model
weights = class_weight.compute_class_weight("balanced", classes=np.unique(y_train_labels), y=y_train_labels)
class_weights = dict(zip(np.unique(y_train_labels), weights))
es = EarlyStopping(monitor="val_loss",
mode="min",
patience=20,
verbose=1,
restore_best_weights=True)
mc = ModelCheckpoint(filepath="../models/mymodel.h5",
monitor="val_loss",
mode="min",
verbose=1,
save_best_only=True)
reduce_lr = ReduceLROnPlateau(monitor="val_loss",
factor=0.9,
patience=5,
min_lr=0.000001,
verbose=1)
history = model.fit(x=X_train,
y=y_train,
class_weight=class_weights,
validation_data=(X_val, y_val),
epochs=500,
batch_size=8,
callbacks=[es, mc, reduce_lr])
Prediction of shows probability of 3 classes (e.g. [0.1, 0.6, 0.3]) but when I load model later using this command.
classifier = load_model("mymodel.h5", compile=False)
probs = classifier.predict(X_test)
It seems that the prediction results is no longer probability but a class label (also incorrectly if we refer to the previous prediction [0.1, 0.6, 0.3] ... I got [0, 0, 1] as the output of the load model. I'm using keras version 2.3.1 and tensorflow 2.1.0. May I know what went wrong and how to fix it?
I run the following code in Google Colab(with GPU):
import random
random.seed(1)
import numpy as np
from numpy.random import seed
seed(1)
from tensorflow import set_random_seed
set_random_seed(2)
import pandas as pd
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.layers import Flatten, Dense, Lambda, SimpleRNN
from keras.optimizers import *
from keras.utils import np_utils
from keras.initializers import *
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, roc_auc_score, auc, precision_recall_curve
from sklearn.metrics import confusion_matrix
from keras.callbacks import EarlyStopping
from keras import backend as K
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
##Loading dataset train and validation files, the files are same for every run
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=5)
print("***********************************************************************************************")
def make_model():
model = Sequential()
model.add(Conv2D(10,(5,5), kernel_initializer=glorot_uniform(seed=1), input_shape = (22,10,1), use_bias = True, activation = "relu", strides = 1, padding = "valid"))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
model.add(Dense(20, kernel_initializer=glorot_uniform(seed=1), activation = "relu"))
model.add(Lambda(lambda x: tf.expand_dims(x, axis=1)))
model.add(SimpleRNN(20, kernel_initializer=glorot_uniform(seed=1), activation="relu",return_sequences=False))
model.add(Dense(1, kernel_initializer=glorot_uniform(seed=1), activation="sigmoid"))
opti = SGD(lr = 0.01)
model.compile(loss = "binary_crossentropy", optimizer = opti, metrics = ["accuracy"])
return model
model = make_model()
model.fit(x_train, y_train, validation_data = (x_validation,y_validation), epochs = 50, batch_size = 20, verbose = 2, callbacks=[es])
Despite setting all seed values, my prediction results of the model are different on subsequent runs. The training and testing of the model happens in the same Colab cell.
You are dealing with floating point numbers that are multiplied and added on different threads and can therefore happen in different order. Floating point additions and multiplications are not commutative. See What Every Computer Scientist Should Know About Floating-Point Arithmetic.
Running a single hidden layer MLP on MNIST, I get extremly different results for Keras and sklearn.
import numpy as np
np.random.seed(5)
import os
os.environ["CUDA_VISIBLE_DEVICES"] = '-1'
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense
from keras import regularizers
from keras.optimizers import Adam
from keras.utils import np_utils
from sklearn.neural_network import MLPClassifier
(x_train, y_train), (x_test, y_test) = mnist.load_data()
num_classes = 10
batch_data = x_train[:2000]
batch_labels = y_train[:2000]
# flat 2d images
batch_data_flat = batch_data.reshape(2000, 784)
# one-hot encoding
batch_labels_one_hot = np_utils.to_categorical(batch_labels, num_classes)
num_hidden_nodes = 100
alpha = 0.0001
batch_size = 128
beta_1 = 0.9
beta_2 = 0.999
epsilon = 1e-08
learning_rate_init = 0.001
epochs = 200
# keras
keras_model = Sequential()
keras_model.add(Dense(num_hidden_nodes, activation='relu',
kernel_regularizer=regularizers.l2(alpha),
kernel_initializer='glorot_uniform',
bias_initializer='glorot_uniform'))
keras_model.add(Dense(num_classes, activation='softmax',
kernel_regularizer=regularizers.l2(alpha),
kernel_initializer='glorot_uniform',
bias_initializer='glorot_uniform'))
keras_optim = Adam(lr=learning_rate_init, beta_1=beta_1, beta_2=beta_2, epsilon=epsilon)
keras_model.compile(optimizer=keras_optim, loss='categorical_crossentropy', metrics=['accuracy'])
keras_model.fit(batch_data_flat, batch_labels_one_hot, batch_size=batch_size, epochs=epochs, verbose=0)
# sklearn
sklearn_model = MLPClassifier(hidden_layer_sizes=(num_hidden_nodes,), activation='relu', solver='adam',
alpha=alpha, batch_size=batch_size, learning_rate_init=learning_rate_init,
max_iter=epochs, beta_1=beta_1, beta_2=beta_2, epsilon=epsilon)
sklearn_model.fit(batch_data_flat, batch_labels_one_hot)
# evaluate both on their training data
score_keras = keras_model.evaluate(batch_data_flat, batch_labels_one_hot)
score_sklearn = sklearn_model.score(batch_data_flat, batch_labels_one_hot)
print("Acc: keras %f, sklearn %f" % (score_keras[1], score_sklearn))
Outputs: Acc: keras 0.182500, sklearn 1.000000
The only difference I see is that scikit-learn computes for the Glorot initialization of the final layer sqrt(2 / (fan_in + fan_out)) vs. sqrt(6 / (fan_in + fan_out)) from Keras. But that should not cause such a difference I think. Do I forget something here?
scikit-learn 0.19.1, Keras 2.2.0 (Backend Tensorflow 1.9.0)
You should probably initialize the biases with 'zeros' and not with 'glorot_uniform'.