I am running out of 25gb ram on google colab - tensorflow

This is my code below I am making a multilabel classification model using 8000 x-ray images can someone here help me this is my code below most of the ram is used while loading the images itself and only 10 epochs are able to run.
Can someone tell me what changes do I need to make to this code for it to run and generate the model.
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications import VGG16
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.layers import *
from tensorflow.keras.layers import AveragePooling2D
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import to_categorical
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
import argparse
import cv2
import os
# construct the argument parser and parse the arguments
# initialize the initial learning rate, number of epochs to train for,
# and batch size
INIT_LR = 1e-3
EPOCHS = 40
BS = 66
# grab the list of images in our dataset directory, then initialize
# the list of data (i.e., images) and class images
print("[INFO] loading images...")
imagePaths = list(paths.list_images('/content/drive/My Drive/testset/'))
data = []
labels = []
# loop over the image paths
for imagePath in imagePaths:
# extract the class label from the filename
label = imagePath.split(os.path.sep)[-2]
# load the image, swap color channels, and resize it to be a fixed
# 224x224 pixels while ignoring aspect ratio
image = cv2.imread(imagePath)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (224, 224))
# update the data and labels lists, respectively
data.append(image)
labels.append(label)
# convert the data and labels to NumPy arrays while scaling the pixel
# intensities to the range [0, 255]
data = np.array(data) / 255.0
labels = np.array(labels)
# perform one-hot encoding on the labels
lb = LabelBinarizer()
labels = lb.fit_transform(labels)
# partition the data into training and testing splits using 80% of
# the data for training and the remaining 20% for testing
(trainX, testX, trainY, testY) = train_test_split(data, labels,
test_size=0.20, stratify=labels, random_state=42)
# initialize the training data augmentation object
trainAug = ImageDataGenerator(
rotation_range=15,
fill_mode="nearest")
# load the VGG16 network, ensuring the head FC layer sets are left
# off
baseModel = VGG16(weights="imagenet", include_top=False,
input_tensor=Input(shape=(224, 224, 3)))
# construct the head of the model that will be placed on top of the
# the base model
headModel = baseModel.output
headModel = AveragePooling2D(pool_size=(4, 4))(headModel)
headModel = Flatten(name="flatten")(headModel)
headModel = Dense(64, activation="relu")(headModel)
headModel = Dropout(0.5)(headModel)
headModel = Dense(3, activation="softmax")(headModel)
# place the head FC model on top of the base model (this will become
# the actual model we will train)
model = Model(inputs=baseModel.input, outputs=headModel)
# loop over all layers in the base model and freeze them so they will
# *not* be updated during the first training process
for layer in baseModel.layers:
layer.trainable = False
# compile our model
print("[INFO] compiling model...")
opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
# train the head of the network
print("[INFO] training head...")
H = model.fit(
trainAug.flow(trainX, trainY, batch_size=BS),
steps_per_epoch=len(trainX) // BS,
validation_data=(testX, testY),
validation_steps=len(testX) // BS,
epochs=EPOCHS)
# make predictions on the testing set
print("[INFO] evaluating network...")
predIdxs = model.predict(testX, batch_size=BS)
# for each image in the testing set we need to find the index of the
# label with corresponding largest predicted probability
predIdxs = np.argmax(predIdxs, axis=1)
# show a nicely formatted classification report
print(classification_report(testY.argmax(axis=1), predIdxs,
target_names=lb.classes_))
# compute the confusion matrix and and use it to derive the raw
# accuracy, sensitivity, and specificity
cm = confusion_matrix(testY.argmax(axis=1), predIdxs)
total = sum(sum(cm))
acc = (cm[0, 0] + cm[1, 1]) / total
sensitivity = cm[0, 0] / (cm[0, 0] + cm[0, 1])
specificity = cm[1, 1] / (cm[1, 0] + cm[1, 1])
# show the confusion matrix, accuracy, sensitivity, and specificity
print(cm)
print("acc: {:.4f}".format(acc))
print("sensitivity: {:.4f}".format(sensitivity))
print("specificity: {:.4f}".format(specificity))
# plot the training loss and accuracy
N = EPOCHS
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, N), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, N), H.history["accuracy"], label="train_acc",color='green')
plt.plot(np.arange(0, N), H.history["val_accuracy"], label="val_acc")
plt.title("Training Loss and Accuracy on COVID-19 Dataset")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="lower left")
plt.savefig('/content/drive/My Drive/setcovid/plot2.png')
# serialize the model to disk
print("[INFO] saving COVID-19 detector model...")
model.save('/content/drive/My Drive/setcovid/model2',save_format="h5" )

You can try to generate TFRecords from this data, store them into your drive and then feed them into your model through batches instead of directly loading them to memory. I would recommend the Hvass Laboratories YouTube channel, tensorflow tutorials playlist, tutorial number 18, TFRecords and Dataset API and the Dataset api tensorflow's official documentation.

Related

Getting Value Error while creating RNN model

Im getting this error: ValueError: logits and labels must have the same shape, received ((32, 1) vs (32, 23740))
This is my full code:
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense, Embedding, LSTM
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score, f1_score
import re
import numpy as np
from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
from keras.utils import pad_sequences
# All data processing stuff
data = pd.read_csv('test.csv') # Load the data
# convert Sentiment type to string
data['Sentiment'] = data['Sentiment'].astype(str)
# remove special characters and convert to lowercase
data["Tweet"] = data["Tweet"].apply(lambda x: x.lower()) # Convert all tweets to lowercase
data["Tweet"] = data["Tweet"].apply(lambda x: x.replace("[^a-zA-Z0-9]", "")) # Remove special characters
data["Sentiment"] = data["Sentiment"].apply(lambda x: x.lower())
# Initialize the Tokenizer
tokenizer = Tokenizer()
# Fit the Tokenizer on the text data
tokenizer.fit_on_texts(data["Tweet"] + data["Sentiment"])
token_sequences1 = tokenizer.texts_to_matrix(data["Tweet"])
token_sequences2 = tokenizer.texts_to_matrix(data["Sentiment"])
padded_sequences1 = pad_sequences(token_sequences1)
padded_sequences2 = pad_sequences(token_sequences2)
# split the data into training and testing sets
x_train, x_test, y_train, y_test = train_test_split(
padded_sequences1, padded_sequences2, test_size=0.2, random_state=42)
# create the RNN model
model = Sequential()
# 1000 is the number of words in the vocabulary, 128 is the dimension of the embedding vector,
model.add(Embedding(1000, 128))
model.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(1, activation='sigmoid'))
# compile the model with a specified loss function and optimizer
# binary_crossentropy is used for binary classification problems like this one (positive or negative sentiment)
# accuracy is the metric used to evaluate the model performance (the percentage of correct predictions)
# the loss function and the optimizer can be changed to see if the model performance improves or not
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
# train the model on the training data and put verbose=1 to see the training progress
model.fit(x_train, y_train, batch_size=32, epochs=10, verbose=1)
# evaluate the model on the testing data
y_pred = model.predict(x_test)
# calculate the evaluation metrics
accuracy = accuracy_score(y_test, y_pred)
precision = precision_score(y_test, y_pred)
recall = recall_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
# print the evaluation metrics
print("Accuracy: {:.2f}".format(accuracy))
print("Precision: {:.2f}".format(precision))
print("Recall: {:.2f}".format(recall))
print("F1 Score: {:.2f}".format(f1))
Im actually trying to create sentiment analyser on news headlines tweets from this data, here is a sample image of how my data looks in CSV.
Please provide a solution to this, I have tried every solution I had find

Why is the Accuracy Different in my PC From Kaggle When Using the Same Code

I am writing a watermark detection algorithm, and I've tried a code from Kaggle which fine-tunes a ResNet, but when I run the same code in Jupyter notebook, I get 50% accuracy when the sample code in Kaggle has around 97% accuracy. I don't have a GPU installed on my PC, and I changed the batch size to 32. Do you know why I get 40% lower accuracy?
My Code:
import tensorflow as tf
import numpy as numpy
import os
from pathlib import Path
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.optimizers import RMSprop
from PIL import Image
basedir = "/home/mahsa/Kaggle/archive/wm-nowm"
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
traindir = os.path.join(basedir,'train') # root for training
validdir = os.path.join(basedir,'valid') # root for testing
traingenerator = ImageDataGenerator(rescale=1./255)
validgenerator = ImageDataGenerator(rescale=1./255)
train_data = traingenerator.flow_from_directory(traindir,target_size=(150,150),batch_size=100,class_mode="binary")
valid_data = validgenerator.flow_from_directory(validdir,target_size=(150,150),batch_size=100,class_mode="binary")
existing = tf.keras.applications.InceptionResNetV2 (input_shape=(150, 150, 3),include_top=False, pooling='max', weights='imagenet')
#for layer in existing.layers:
# layer.trainable = False
#existing.summary()
last = existing.get_layer("mixed_7a")
last_output = last.output
# Flatten the output layer to 1 dimension
x = tf.keras.layers.Flatten()(last_output)
x = tf.keras.layers.Dropout(0.25)(x)
# Add a fully connected layer with 1,024 hidden units and ReLU activation
x = tf.keras.layers.Dense(128, activation='relu')(x)
x = tf.keras.layers.Dense(64, activation='relu')(x)
# Add a final sigmoid layer for classification
x = tf.keras.layers.Dense (1, activation='sigmoid')(x)
model = tf.keras.Model( existing.input, x)
model.compile(optimizer=RMSprop(lr=0.001),
loss='binary_crossentropy',
metrics = ['accuracy'])
history = model.fit(train_data,
validation_data=valid_data,
steps_per_epoch=150,
epochs=60,
validation_steps=50,
verbose=2)
I found out that the problem was batch_size, I increased the batch_size to 100, it took about 1.5 day to train the model but I got 99% accuracy.

How to feed grayscale images into a pretrained neural network models?

Noob here . I have been working with grayscale images for mitosis classification Here's a sample image I'm working with . I have employed VGGnet for achieving this. I've some doubts about how my grayscale image should be fed into the neural network. I've read the documentation of VGGnet about being trained on colored images on Imagenet.
I read the images using cv2.imread() and by squeezing into an array. I found its shape to be (227,227,3). Shouldn't it be (227,227,1) when I'm handling with grayscale images ? Model accuracy was also found to be only 50% . I'm wondering if it's something wrong with the dataset itself or VGGnet isn't suitable for this purpose. Or should I use some other method to read these images to get grayscale images?
I have tried the solutions listed in similar questions . Am I reading the images in the right way?
I'm sharing my code here.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Display Image data
from PIL import Image
import cv2
from google.colab import drive
drive.mount('/content/drive')
mitotic_image = Image.open('/content/drive/MyDrive/Medical/actualmito1/trainactmito01.jpg')
nonmitotic_image=Image.open('/content/drive/MyDrive/Medical/actualnonmito1/trainactnonmito01.jpg')
# subplotting image data
fig = plt.figure(figsize=(15,9))
ax1 = fig.add_subplot(1, 2, 1)
img_plot = plt.imshow(nonmitotic_image, cmap = plt.cm.bone)
ax1.set_title("Non-Mitotic Image")
ax2 = fig.add_subplot(1, 2, 2)
img_plot = plt.imshow(mitotic_image, cmap = plt.cm.bone)
ax2.set_title("Mitotic Image")
plt.show()
import os
yes = os.listdir("/content/drive/MyDrive/Medical/actualmito1")
no = os.listdir("/content/drive/MyDrive/Medical/actualnonmito1")
data = np.concatenate([yes, no])
target_yes = np.full(len(yes), 1)
target_no = np.full(len(no), 0)
# Image Target
data_target = np.concatenate([target_yes, target_no])
# Generate Image Data
img = cv2.imread("/content/drive/MyDrive/Medical/actualmito1/trainactmito01.jpg")
mitosis = cv2.resize(img,(32,32))
plt.imshow(mitosis)
X_data = []
yes = os.listdir("/content/drive/MyDrive/Medical/actualmito1")
for file in yes:
img = cv2.imread("/content/drive/MyDrive/Medical/actualmito1/" + file)
# resizing image data to 32x32
img = cv2.resize(img, (224,224))
X_data.append(img) # This will store list of all image data in an array
no = os.listdir("/content/drive/MyDrive/Medical/actualnonmito1")
for file in no:
img = cv2.imread("/content/drive/MyDrive/Medical/actualnonmito1/" + file)
# resizing image data to 32x32
img = cv2.resize(img, (224,224))
X_data.append(img) # This will store list of all image data in an array
X = np.squeeze(X_data)
X.shape
# Image Pixel Normalization
X = X.astype('float32')
X /= 255
X.shape
# Train & Test Data
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(X, data_target, test_size = 0.1,
random_state = 3)
x_train2, x_val, y_train2, y_val = train_test_split(x_train, y_train, test_size = 0.15,
random_state = 3)
# VGG16 - Transfer Learning
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Conv2D, GlobalAveragePooling2D, Flatten, ZeroPadding2D,
Dropout, BatchNormalization
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.applications import VGG16
def build_model():
#use Imagenet = pre-trained models weights called knowledge transfer
# image_shape = 32x32x3
vgg16_model = VGG16(weights = 'imagenet', include_top = False, input_shape=(224,224,3))
# Input Layer
model = Sequential()
# paadding = 'same' = ZeroPadding
model.add(Conv2D(filters=3, kernel_size=(3,3), padding='same', input_shape = (224,224,3)))
# add transfer learning model
model.add(vgg16_model)
# Average Pooling Layer
model.add(GlobalAveragePooling2D())
model.add(BatchNormalization())
model.add(Dropout(0.5))
# Fully Connected Layer
model.add(Dense(units = 512, activation='relu'))
model.add(Dropout(0.5))
# Output Layer
model.add(Dense(units = 1, activation='sigmoid'))
model.compile(optimizer = 'Adam', loss = 'binary_crossentropy', metrics = ['Accuracy'])
return model
model = build_model()
model.summary()
from tensorflow.keras import callbacks
filepath = "/content/drive/MyDrive/BestModelMRI3.hdf5"
checkpoint = callbacks.ModelCheckpoint(filepath, monitor = 'val_loss', save_best_only = True,
mode = 'min',verbose = 1)
import datetime
import keras
import os
logdir = os.path.join("/content/drive/MyDrive/MRI_logs",
datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
tensorboard_callback = keras.callbacks.TensorBoard(logdir)
history = model.fit(x_train2, y_train2, epochs = 200, batch_size = 32, shuffle = True,
validation_data = (x_val, y_val), callbacks = [checkpoint, tensorboard_callback],verbose= 1)
model.load_weights("/content/drive/MyDrive/BestModelMRI3.hdf5")
model.evaluate(x_test, y_test)
predictions = model.predict(x_test)
yhat = np.round(predictions)
from sklearn.metrics import confusion_matrix, classification_report
confusion_matrix(y_test, yhat)
sns.heatmap(confusion_matrix(y_test, yhat), annot = True, cmap = 'RdPu')
print(classification_report(y_test, yhat))
VGG model was trained on RGB images. CV2 reads in images as BGR. so add code
img=cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
to convert BGR to RGB. Also VGG requires the pixels to be in the range -1 to +1 so scale with
X=X/127.5 -1

I want to convert my binary classification model to multiclass classification model I am taking labels using directory names

This is my code below it works fine for classification of two categories of images it takes labels based on directory names but whenever I add one more directory it stops working can someone help me
This is my code for image classification for images from two directories and two labels but when I convert it to three labels/ directories I get an error the error is posted below can someone help me solve the problem This if for image classification
I have tried removing the NumPy array I somewhere saw I need to just pass it through a CNN but I couldn't do that.
I am trying to make a classifier for pneumonia caused by a coronavirus and other disease using frontal chest x rays
from tensorflow.keras.preprocessing.image import ImageDataGeneratorfrom
from tensorflow.keras.applications import VGG16
from tensorflow.keras.layers import AveragePooling2D
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import to_categorical
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
import argparse
import cv2
import os
# construct the argument parser and parse the arguments
# initialize the initial learning rate, number of epochs to train for,
# and batch size
INIT_LR = 1e-3
EPOCHS = 40
BS = 66
# grab the list of images in our dataset directory, then initialize
# the list of data (i.e., images) and class images
print("[INFO] loading images...")
imagePaths = list(paths.list_images('/content/drive/My Drive/testset/'))
data = []
labels = []
# loop over the image paths
for imagePath in imagePaths:
# extract the class label from the filename
label = imagePath.split(os.path.sep)[-2]
# load the image, swap color channels, and resize it to be a fixed
# 224x224 pixels while ignoring aspect ratio
image = cv2.imread(imagePath)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (224, 224))
# update the data and labels lists, respectively
data.append(image)
labels.append(label)
# convert the data and labels to NumPy arrays while scaling the pixel
# intensities to the range [0, 255]
data = np.array(data) / 255.0
labels = np.array(labels)
# perform one-hot encoding on the labels
lb = LabelBinarizer()
labels = lb.fit_transform(labels)
labels = to_categorical(labels)
# partition the data into training and testing splits using 80% of
# the data for training and the remaining 20% for testing
(trainX, testX, trainY, testY) = train_test_split(data, labels,
test_size=0.20, stratify=labels, random_state=42)
# initialize the training data augmentation object
trainAug = ImageDataGenerator(
rotation_range=15,
fill_mode="nearest")
# load the VGG16 network, ensuring the head FC layer sets are left
# off
baseModel = VGG16(weights="imagenet", include_top=False,
input_tensor=Input(shape=(224, 224, 3)))
# construct the head of the model that will be placed on top of the
# the base model
headModel = baseModel.output
headModel = AveragePooling2D(pool_size=(4, 4))(headModel)
headModel = Flatten(name="flatten")(headModel)
headModel = Dense(64, activation="relu")(headModel)
headModel = Dropout(0.5)(headModel)
headModel = Dense(2, activation="softmax")(headModel)
# place the head FC model on top of the base model (this will become
# the actual model we will train)
model = Model(inputs=baseModel.input, outputs=headModel)
# loop over all layers in the base model and freeze them so they will
# *not* be updated during the first training process
for layer in baseModel.layers:
layer.trainable = False
# compile our model
print("[INFO] compiling model...")
opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])
# train the head of the network
print("[INFO] training head...")
H = model.fit(
trainAug.flow(trainX, trainY, batch_size=BS),
steps_per_epoch=len(trainX) // BS,
validation_data=(testX, testY),
validation_steps=len(testX) // BS,
epochs=EPOCHS)
# make predictions on the testing set
print("[INFO] evaluating network...")
predIdxs = model.predict(testX, batch_size=BS)
# for each image in the testing set we need to find the index of the
# label with corresponding largest predicted probability
predIdxs = np.argmax(predIdxs, axis=1)
# show a nicely formatted classification report
print(classification_report(testY.argmax(axis=1), predIdxs,
target_names=lb.classes_))
# compute the confusion matrix and and use it to derive the raw
# accuracy, sensitivity, and specificity
cm = confusion_matrix(testY.argmax(axis=1), predIdxs)
total = sum(sum(cm))
acc = (cm[0, 0] + cm[1, 1]) / total
sensitivity = cm[0, 0] / (cm[0, 0] + cm[0, 1])
specificity = cm[1, 1] / (cm[1, 0] + cm[1, 1])
# show the confusion matrix, accuracy, sensitivity, and specificity
print(cm)
print("acc: {:.4f}".format(acc))
print("sensitivity: {:.4f}".format(sensitivity))
print("specificity: {:.4f}".format(specificity))
# plot the training loss and accuracy
N = EPOCHS
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, N), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, N), H.history["accuracy"], label="train_acc")
plt.plot(np.arange(0, N), H.history["val_accuracy"], label="val_acc")
plt.title("Training Loss and Accuracy on COVID-19 Dataset")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="lower left")
plt.savefig("plot.png")
# serialize the model to disk
print("[INFO] saving COVID-19 detector model...")
model.save('/content/drive/My Drive/setcovid/model.h5', )
This is the error I got in my program
There are a few changes you need to make it work. The error you're getting is because of one-hot-encode. You're encoding your labels to one-hot twice.
lb = LabelBinarizer()
labels = lb.fit_transform(labels)
labels = to_categorical(labels)
Please remove the last line 'to_categorical' from your code. You will get the one-hot encode in the correct format. It will fix the error you're getting now.
And there is another problem I must mention. Your model output layer has only 2 neurons but you want to classify 3 classes. Please set the output layer neurons to 3.
headModel = Dense(3, activation="softmax")(headModel)
And you're now training with 3 classes, it's not binary anymore. You have to use another loss. I will recommend you to use categorical.
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
You also forgot to import the followings. Add these imports too.
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.layers import *
And you're good to go.
Btw, I'm pretty much afraid of the batch size(66) you're using. I don't know which GPU you have but still, I would suggest you decrease the batch size.

Keras use trained InceptionV3 model + CIFAR10 got error about Batch Size

I am new to Machine Learning and Keras etc.
Trying to use trained model to increase accuracy, in my case I followed Jerry Kurata on Pluralsight to use InceptionV3 and only modify the last layer to train for recognizing birds.
The dataset I have is from Keras built-in CIFAR10 and here is the official tutorial
Here is the error message:
F tensorflow/stream_executor/cuda/cuda_dnn.cc:516] Check failed:
cudnnSetTensorNdDescriptor(handle_.get(), elem_type, nd, dims.data(),
strides.data()) == CUDNN_STATUS_SUCCESS (3 vs. 0)batch_descriptor:
{count: 32 feature_map_count: 288 spatial: %d 0%d 0 value_min:
0.000000 value_max: 0.000000 layout: BatchDepthYX} Aborted (core dumped)
I saw 1 possible cause from here
The image samples in CIFAR10 (32*32) is too small which cause this
issue
But I cannot figure out how to fix it.
Here is my code:
import matplotlib.pyplot as plt
import keras
from keras import backend as K
with K.tf.device("/device:GPU:0"):
config = K.tf.ConfigProto(intra_op_parallelism_threads=4,
inter_op_parallelism_threads=4, allow_soft_placement=True,
device_count = {'CPU' : 1, 'GPU' : 1})
session = K.tf.Session(config=config)
K.set_session(session)
from keras.callbacks import EarlyStopping
from keras.applications.inception_v3 import InceptionV3, preprocess_input
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import SGD
from keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D
from keras.datasets import cifar10
# "/device:GPU:0"
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def create_generator():
return ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
zca_epsilon=1e-06, # epsilon for ZCA whitening
rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)
# randomly shift images horizontally (fraction of total width)
width_shift_range=0.1,
# randomly shift images vertically (fraction of total height)
height_shift_range=0.1,
shear_range=0., # set range for random shear
zoom_range=0., # set range for random zoom
channel_shift_range=0., # set range for random channel shifts
# set mode for filling points outside the input boundaries
fill_mode='nearest',
cval=0., # value used for fill_mode = "constant"
horizontal_flip=True, # randomly flip images
vertical_flip=False, # randomly flip images
# set rescaling factor (applied before any other transformation)
rescale=None,
# set function that will be applied on each input
preprocessing_function=None,
# image data format, either "channels_first" or "channels_last"
data_format=None,
# fraction of images reserved for validation (strictly between 0 and 1)
validation_split=0.0)
Training_Epochs = 1
Batch_Size = 32
Number_FC_Neurons = 1024
Num_Classes = 10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, Num_Classes)
y_test = keras.utils.to_categorical(y_test, Num_Classes)
# load cifar10 data here https://keras.io/datasets/
datagen = create_generator()
datagen.fit(x_train)
Inceptionv3_model = InceptionV3(weights='imagenet', include_top=False)
print('Inception v3 model without last FC loaded')
x = Inceptionv3_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(Number_FC_Neurons, activation='relu')(x)
predictions = Dense(Num_Classes, activation='softmax')(x)
model = Model(inputs=Inceptionv3_model.input, outputs=predictions)
# print(model.summary())
print('\nFine tuning existing model')
Layers_To_Freeze = 172
for layer in model.layers[:Layers_To_Freeze]:
layer.trainable = False
for layer in model.layers[Layers_To_Freeze:]:
layer.trainable = True
model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy', metrics=['accuracy'])
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
cbk_early_stopping = EarlyStopping(monitor='val_acc', mode='max')
print(len(x_train))
history_transfer_learning = model.fit_generator(
datagen.flow(x_train, y_train, batch_size=Batch_Size),
epochs=Training_Epochs,
validation_data=(x_test, y_test),
workers=4,
steps_per_epoch=len(x_train)//Batch_Size,
callbacks=[cbk_early_stopping]
)
model.save('incepv3_transfer_cifar10.h5', overwrite=True, include_optimizer=True)
# Score trained model.
scores = model.evaluate(x_test, 12, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
Your error as you said is the input size difference. The pre trained Imagenet model takes a bigger size of image than the Cifar-10 (32, 32).
You need to specify the input_shape of the model before hand like this.
Inceptionv3_model = InceptionV3(weights='imagenet', include_top=False, input_shape=(32, 32, 3))
For more explanation you can check this tutorial.