I try several times to train the following model on Google Colab but it's disconnect permanently (on the 5 or 6 epoch) and the training will never be done. I try also the JavaScript function that maintain the session when the button connect was clicked but not work.How I can resolve this problem please ?
classifier = Sequential()
classifier.add(Conv2D(6, (3, 3), input_shape = (30, 30, 3), data_format="channels_last", activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
classifier.add(Conv2D(6, (3, 3), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
classifier.add(Flatten())
classifier.add(Dense(units = 128, activation = 'relu'))
classifier.add(Dense(units = 64, activation = 'relu'))
classifier.add(Dense(units = 1, activation = 'sigmoid'))
opt = Adam(learning_rate = 0.001, beta_1 = 0.9, beta_2 = 0.999, epsilon = 1e-08, decay = 0.0)
classifier.compile(optimizer = opt, loss = 'binary_crossentropy', metrics = ['accuracy', precision, recall, fmeasure])
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255,
horizontal_flip = True,
vertical_flip = True,
rotation_range = 180)
validation_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('/home/dataset/training_set',
target_size = (30, 30),
batch_size = 32,
class_mode = 'binary')
validation_set = validation_datagen.flow_from_directory('/home/dataset/validation_set',
target_size = (30, 30),
batch_size = 32,
class_mode = 'binary')
history = classifier.fit_generator(training_set,
steps_per_epoch = 208170,
epochs = 25,
validation_data = validation_set,
validation_steps = 89140)
Colab will automatically close your notebook after 8-9 hours so you should checkpoint the model after each epoch into google drive
Related
everything was going well with these cnn to classify MarkovTransitionField sequences, when I was using tensorflow, but then I changed to tensorflow-gpu and all predictions return same value, so model isn't learning (but is fast)
#model 1
def model_1_signal():
model = Sequential()
model.add(Conv2D(73, (5,5), strides = (2,2), activation = 'relu',
padding = 'same', input_shape = (145,5,5),
kernel_initializer = 'he_normal',
bias_initializer = 'zeros'))
model.add(Conv2D(73, (5,5), strides = (2,2), activation = 'relu',
padding = 'same', kernel_initializer = 'he_normal',
bias_initializer = 'zeros'))
model.add(Flatten())
model.add(Dense(2, activation = 'sigmoid',
kernel_initializer = 'glorot_uniform',
bias_initializer = 'zeros'))
model.compile(loss = 'categorical_crossentropy',
optimizer = 'adam',
metrics = ['accuracy'])
return model
def model_2_signal():
model = Sequential()
model.add(Conv2D(73, (5,5), activation = 'relu',
padding = 'same', input_shape = (145,5,5)))
model.add(Dropout(0.2))
model.add(Conv2D(73, (5,5), strides = (2,2), activation = 'relu',
padding = 'same'))
model.add(Dropout(0.2))
model.add(Conv2D(73, (5,5), strides = (2,2), activation = 'relu',
padding = 'same'))
model.add(Dropout(0.2))
model.add(Conv2D(73, (5,5), strides = (2,2), activation = 'relu',
padding = 'same'))
model.add(MaxPooling2D(pool_size = (1,1),strides = 3))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(64, activation = 'relu'))
model.add(Dropout(0.2))
model.add(Dense(64, activation = 'relu'))
model.add(Dropout(0.2))
model.add(Dense(2, activation = 'sigmoid'))
model.compile(loss = 'categorical_crossentropy',
optimizer = 'adam',
metrics = ['accuracy'])
return model
EPOCHS = 300
BATCH_SIZE = 15
train_X, val_X, train_y, val_y, train_date, val_date = train_test_split(bullish_episodes_img,y,date,test_size = 0.13, shuffle = False)
val_X, test_X, val_y, test_y, val_date, test_date = train_test_split(val_X, val_y, val_date, test_size = 0.38, shuffle = False)
model_1 = model_1_signal()
model_1.fit(train_X,train_y, validation_data=(val_X,val_y),
epochs = EPOCHS, batch_size = BATCH_SIZE, verbose = 2,
shuffle = True)
yhat_model1 = model_1.predict(test_X)
yhat_model1 = np.where(yhat_model1 >= 0.5, 1, 0)
df1 = pd.DataFrame({'signal':yhat_model1[:,1],'test':test_y[:,1],'time':test_date})
df1 = df1.sort_values(by='time')
#model_1.save('/home/f320x/Documents/AT/Py (1)/final_project/new_standard/spyder/spyder/EURCAD/signal_model/signal_model1')
model_2 = model_2_signal()
model_2.fit(train_X,train_y, validation_data=(val_X,val_y),
epochs = EPOCHS, batch_size = BATCH_SIZE, verbose = 2,
shuffle = True)
yhat_model2 = model_2.predict(test_X)
yhat_model2 = np.where(yhat_model2 >= 0.5, 1, 0)
df2 = pd.DataFrame({'signal':yhat_model2[:,1],'test':test_y[:,1],'time':test_date})
df2 = df2.sort_values(by='time')
before transform data to MarkovTransitionField, i scaled all values between 0 and 1 and there's no nan in dataset.
does somebody has a hint?
I am using a Keras CNN for handwritten digit recognition. I downloaded dataset from Kaggle.The preprocessing is :
train = pd.read_csv("./input/train.csv")
test = pd.read_csv("./input/test.csv")
Y_train = train["label"]
X_train = train.drop(labels = ["label"],axis = 1)
X_train = X_train / 255.0
test = test / 255.0
X_train = X_train.values.reshape(-1,28,28,1)
test = test.values.reshape(-1,28,28,1)
Y_train = to_categorical(Y_train, num_classes = 10)
random_seed = 2
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.1, random_state=random_seed)
My model looks like:
model = Sequential()
model.add(Conv2D(filters = 32, kernel_size = (5,5),padding = 'Same',
activation ='relu', input_shape = (28,28,1)))
model.add(Conv2D(filters = 32, kernel_size = (5,5),padding = 'Same',
activation ='relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters = 64, kernel_size = (3,3),padding = 'Same',
activation ='relu'))
model.add(Conv2D(filters = 64, kernel_size = (3,3),padding = 'Same',
activation ='relu'))
model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(10, activation = "softmax"))
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"])
learning_rate_reduction = ReduceLROnPlateau(monitor='val_loss',
patience=3,
verbose=1,
factor=0.5,
min_lr=0.00001)
epochs = 1
batch_size = 86
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=10, # randomly rotate images in the range (degrees, 0 to 180)
zoom_range = 0.1, # Randomly zoom image
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=False, # randomly flip images
vertical_flip=False) # randomly flip images
datagen.fit(X_train)
history = model.fit(datagen.flow(X_train,Y_train, batch_size=batch_size),
epochs = epochs,
validation_data = (X_val,Y_val),
verbose = 2,
steps_per_epoch=X_train.shape[0] // batch_size,
callbacks=[learning_rate_reduction])
Predicting my input:
def predict(image):
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image = cv2.resize(image, (28, 28))
image = image.astype('float32')
image = image.reshape(1, 28, 28, 1)
image /= 255
model = load_model('./model.h5')
pred = model.predict(image, batch_size=1)
print("Predicted Number: ", pred.argmax())
predict(cv2.imread('./testImages/1.png'))
What am I doing wrong?
Desired result is the digit that is provided as an input image, instead I get the same output (i.e. digit 8) for every input.
I used the CNN model with Keras to make an image binary classification, during the final prediction part, I defined such function below to output the prediction result:
model = keras.Sequential()
model.add(Conv2D(filters = 64, kernel_size = (3, 3), activation = 'relu', input_shape = ((256,256,3))))
model.add(MaxPooling2D(pool_size = (2, 2), strides=(2, 2)))
model.add(Conv2D(filters = 128, kernel_size = (3, 3), activation = 'relu'))
model.add(MaxPooling2D(pool_size = (2, 2), strides=(2, 2)))
model.add(Conv2D(filters = 256, kernel_size = (3, 3), activation = 'relu'))
model.add(MaxPooling2D(pool_size = (2, 2), strides=(2, 2)))
model.add(Flatten())
model.add(Dense(units = 512, activation = 'relu'))
model.add(Dense(units = 1,activation='sigmoid'))
model.compile(optimizer='adam',
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=['accuracy'])
history = model.fit(
train_ds,
validation_data=valid_ds,
epochs=10)
def testing_image(image_directory):
test_image = image.load_img(image_directory, target_size = (256, 256))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
result = model.predict(test_image)
print(result)
testing_image('/content/drive/MyDrive/testing/01.jpg')
The output is:
[[0.4733843]]
The output is always a decimal number, but I want the output the result as only
0or 1 and without the array representation.
Any help is appreciated.
Sigmoid activation function returns the values between 0 to 1 where the values <0.5 implies to category zero(0) and >0.5 implies to category one(1) in binary classification.
To get these binary numbers, you need to add one more line of code in testing_image() as below:
Fixed code:
def testing_image(image_directory):
test_image = image.load_img(image_directory, target_size = (256, 256))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
#Changes in code
pred = model.predict(test_image)
result = np.where(pred > 0.5, 1, 0) #<--to get the binary category
print(result)
testing_image('/content/drive/MyDrive/testing/01.jpg')
when i run these lines of code for binary classification it is running well without any problem and get a good result, but when i try to make it for many classes e.g 3 classes it give "NaN" in predict result
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
# Initialising the CNN
classifier = Sequential()
# Step 1 - Convolution
classifier.add(Conv2D(32, (3, 3), input_shape = (64, 64, 3), activation = 'relu'))
# Step 2 - Pooling
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Adding a second convolutional layer
classifier.add(Conv2D(32, (3, 3), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
classifier.add(Conv2enter code hereD(32, (3, 3), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
classifier.add(Conv2D(32, (3, 3), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Step 3 - Flattening
classifier.add(Flatten())
# Step 4 - Full connection
classifier.add(Dense(units = 128, activation = 'relu'))
classifier.add(Dense(units = 3, activation = 'sigmoid'))
# Compiling the CNN
classifier.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
# Part 2 - Fitting the CNN to the images
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('data/train',
target_size = (64, 64),
batch_size = 32,
class_mode = 'categorical')
test_set = test_datagen.flow_from_directory('data/test',
target_size = (64, 64),
batch_size = 32,
class_mode = 'categorical')
classifier.fit_generator(training_set,
steps_per_epoch = 240 ,
epochs = 25,
validation_data = test_set,
validation_steps = 30)
import numpy as np
from keras.preprocessing import image
test_image = image.load_img('2.jpeg', target_size = (64, 64))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
result = classifier.predict(test_image)
training_set.class_indices
i tried these lines of code with loss function "binary" with 2 classes it worked well without any problems and get a good result that helped me with my work and the accuracy approximately '93%' .
but my project based on multi class classification, so i tried to change the loss function to 'categorical_crossentropy' and the class mod in fit_generator to 'categorical' to make it multi class, the accuracy start with 60% and grows up to 99 and suddenly drop down to 33%.
the expected result the labels of the classes
the actual result is "NaN".
thanks in advance.
For multi-class classification, usually softmax is applied on the last dense layer instead of sigmoid. Change it to softmax to see whether the issue is still there.
I have written a code in python for image classification, but after writing code, I have got this error and in the dataset of images, there are images of sizes min. 655x53, my task is to classify the images of cells into 14 categories. there are total 6889 images of training set and 3771 images of test/validation set
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_dir = '/vista/train1'
validation_dir = '/vista/test1'
train_data = ImageDataGenerator(rescale = 1/255,
rotation_range = 20,
width_shift_range = 0.2,
height_shift_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True,
fill_mode = 'nearest')
validation_data = ImageDataGenerator(rescale = 1/255)
train_gen = train_data.flow_from_directory(train_dir,
batch_size = 30,
class_mode = 'categorical',
target_size = (400,200))
validation_gen = validation_data.flow_from_directory(validation_dir,
batch_size = 30,
class_mode =
'categorical',
target_size = (400,200))
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(64,(3,3),activation = 'relu',input_shape =
(400,200,3)),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(128,(3,3),activation = 'relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(128,(3,3),activation = 'relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(128,(3,3),activation = 'relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(256,activation = 'relu'),
tf.keras.layers.Dropout(0.20),
tf.keras.layers.Dense(14,activation = 'softmax')
])
model.compile(optimizer = 'Adam',
loss = 'sparse_categorical_crossentropy',
metrics = ['acc'])
df = int(3711/30)
ef = int(6889/30)
history = model.fit_generator(train_gen,
steps_per_epoch = df,
epochs = 20,
validation_data = validation_gen,
`enter code here` validation_steps = ef,
verbose =2)
model.save_weights('/vista',overwrite=True,save_format = '.h5')