Keras FER-2013 model predict for a single image - tensorflow

i'm pretty new to machine learning. I followed a tutorial to classify if the user is similing or not. I created this code:
def get_model(input_size, classes=7):
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), padding='same', activation='relu', input_shape =input_size))
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(MaxPooling2D(2, 2))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=(3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(0.01)))
model.add(Conv2D(256, kernel_size=(3, 3), activation='relu', kernel_regularizer=regularizers.l2(0.01)))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(classes, activation='softmax'))
#Compliling the model
model.compile(optimizer=Adam(lr=0.0001, decay=1e-6),
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
if i try to predict an array from flow_from_directory its working fine but i would like to predict it using the following code:
final_image = cv2.imread('./tesimg.jpeg')
final_image = np.expand_dims(final_image, axis=0)
final_image = final_image/255.0
The problem is that i'm getting this error:
UnimplementedError: Graph execution error:

Related

get VGG16 model to acceptable accuracy

I'm trying to get VGG16 model to acceptable accuracy but I can't get it above .3
here's the model
def VGG16():
model = Sequential()
model.add(Conv2D(input_shape=(224,224,3),filters=64,kernel_size=(3,3),padding='same', activation='relu'))
model.add(Conv2D(filters=64,kernel_size=(3,3),padding='same', activation='relu'))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(Conv2D(filters=128, kernel_size=(3,3), padding='same', activation='relu'))
model.add(Conv2D(filters=128, kernel_size=(3,3), padding='same', activation='relu'))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(Conv2D(filters=256, kernel_size=(3,3), padding='same', activation='relu'))
model.add(Conv2D(filters=256, kernel_size=(3,3), padding='same', activation='relu'))
model.add(Conv2D(filters=256, kernel_size=(3,3), padding='same', activation='relu'))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding='same', activation='relu'))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding='same', activation='relu'))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding='same', activation='relu'))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding='same', activation='relu'))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding='same', activation='relu'))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding='same', activation='relu'))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2),name='vgg16'))
model.add(Flatten(name='flatten'))
model.add(Dense(4096, activation='relu', name='fc1'))
model.add(Dense(4096, activation='relu', name='fc2'))
model.add(Dense(9, activation='softmax', name='output'))
return model
opt = SGD(learning_rate=1e-6, momentum=0.9)
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
some answers here suggested changing the number of neurons in the fully connected layers to 4096 (originally used 256 and 128), using SGD instead of Adam, increasing the number of epochs (tried 50, 100 and 200) and the batch size (tried 64 and 128) but I can't get it above .3 and usually it's .2.
parameters I used in the best result are
fully connected neurons 4096
optimizer SGD
learning rate e-6
epochs 100
batch size 128
edit dataset used https://www.kaggle.com/datasets/nodoubttome/skin-cancer9-classesisic
you did not show the data for the model training but I suspect your model will be very prone to over fitting. You need to add some dropout layers and some regularization.
After your flatten layer type the following
model.add(BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001 ))
model.add(Dense(256, kernel_regularizer = regularizers.l2(l = 0.016),activity_regularizer=regularizers.l1(0.006),
bias_regularizer=regularizers.l1(0.006) ,activation='relu') )
model.add(Dropout(rate=.4, seed=123), name='dropout'))
model.add(Dense(9, activation='softmax', name='output'))
it would be helpful if you provide the model training data as well

Why is testing accuracy marginally higher then the training accuracy?

I am running a 3 class classification problem with around 7200 total images using CNN. I have used 80:20 split and the accuracy and loss curves are attached along with the code.
Can someone explain why the validation accuracy is higher then the training and similarly training loss is higher than the validation loss. Although the difference is marginal (around 1%), I am not sure if its acceptable or I need to improve the model.
Here is the model I am running with batch_size = 64
# define cnn model
def define_model():
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same', input_shape=(64, 64, 3)))
model.add(BatchNormalization())
model.add(MaxPooling2D((2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))
model.add(BatchNormalization())
model.add(MaxPooling2D((2, 2)))
model.add(Dropout(0.3))
model.add(Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))
model.add(BatchNormalization())
model.add(MaxPooling2D((2, 2)))
model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dense(128, activation='relu', kernel_initializer='he_uniform'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(3, activation='softmax'))
# compile model
opt = SGD(lr=0.001, momentum=0.9)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
return model
I tried various other models but the accuracy and loss curves didn't seem good. This one seems good but I was expecting the training accuracy to be higher than the validation accuracy

Conv2D is incompatible with the layer in a GAN

I am developing a GAN using the Mnist dataset. I have developed the Generator and Discriminator. However, when I combine them together I get this error: Input 0 of layer "conv2d" is incompatible with the layer: expected axis -1 of input shape to have value 1, but received input with shape (None, 57, 57, 1024). Does anyone know why this happens? Do I have to add something else?
The preprocessing:
(x_train, _), (x_test, _) = mnist.load_data()
x_train = x_train.reshape(60000, 28, 28, 1)
x_test = x_test.reshape(10000, 28, 28, 1)
x_train = x_train.astype('float32')/255
x_test = x_test.astype('float32')/255
img_rows, img_cols = 28, 28
channels = 1
img_shape = (img_rows, img_cols, channels)
The Generator:
def generator():
model = Sequential()
model.add(Conv2DTranspose(32, (3,3), strides=(2, 2), activation='relu', use_bias=False,
input_shape=img_shape))
model.add(BatchNormalization(momentum=0.3))
model.add(Conv2DTranspose(64,(3,3),strides=(2,2), activation='relu', padding='same',
use_bias=False))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(LeakyReLU(alpha=0.2))
model.add(Conv2DTranspose(64,(3,3),strides=(2,2), activation='relu', padding='same',
use_bias=False))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(BatchNormalization(momentum=0.3))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(512, activation=LeakyReLU(alpha=0.2)))
model.add(BatchNormalization(momentum=0.7))
model.add(Dense(1024, activation='tanh'))
model.summary()
model.compile(loss=keras.losses.binary_crossentropy, optimizer=Adam(learning_rate=0.02))
return model
generator = generator()
The Discriminator:
def discriminator():
model = Sequential()
model.add(Conv2D(32, (5,5), strides=(2, 2), activation='relu', use_bias=False,
input_shape=img_shape))
model.add(BatchNormalization(momentum=0.3))
model.add(Conv2D(64,(5,5),strides=(2,2), activation='relu', padding='same',
use_bias=False))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(LeakyReLU(alpha=0.2))
model.add(Conv2D(64,(5,5),strides=(2,2), activation='relu', padding='same',
use_bias=False))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(BatchNormalization(momentum=0.3))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(512, activation=LeakyReLU(alpha=0.2)))
model.add(BatchNormalization(momentum=0.7))
model.add(Dense(1024, activation='tanh'))
model.summary()
model.compile(loss=keras.losses.binary_crossentropy, optimizer=Adam(learning_rate=0.02))
return model
discriminator = discriminator()
Both models combined (Where I get the error):
def GAN(generator, discriminator):
model = Sequential()
model.add(generator)
discriminator.trainable = False
model.add(discriminator)
model.summary()
model.compile()
return model
gan = GAN(generator, discriminator)
Your generator needs to produce images, thus the output shape of the generator must be the same shape as the images. The activation also must be compatible with the range in the images. I don't think your images go from -1 to +1, so you should not use "tanh". You must choose an activation compatible with the images.
Last generator layer:
Dense(img_shape[-1], ...)
Your discriminator needs to say whether the images are true or false, thus its output must have one value only, 0 or 1.
Last discriminator layer:
Dense(1, activation="sigmoid")

Failed to get convolution algorithm. This is probably because cuDNN failed to initialize

I am trying to learn VGG16 model. But now, I got an error like,
Using TensorFlow backend.
UnknownError: Failed to get convolution algorithm. This is probably
because cuDNN failed to initialize, so try looking to see if a warning
log message was printed above. [[{{node conv2d_1/convolution}} =
Conv2D[T=DT_FLOAT, data_format="NCHW", dilations=[1, 1, 1, 1],
padding="VALID", strides=[1, 1, 1, 1], use_cudnn_on_gpu=true,
_device="/job:localhost/replica:0/task:0/device:GPU:0"](conv2d_1/convolution-0-TransposeNHWCToNCHW-LayoutOptimizer,
conv2d_1/kernel/read)]] [[{{node dense_3/Softmax/_211}} =
_Recvclient_terminated=false, recv_device="/job:localhost/replica:0/task:0/device:CPU:0",
send_device="/job:localhost/replica:0/task:0/device:GPU:0",
send_device_incarnation=1, tensor_name="edge_237_dense_3/Softmax",
tensor_type=DT_FLOAT,
_device="/job:localhost/replica:0/task:0/device:CPU:0"]]
Here are my system versions,
Windows 10
Tensorflow 1.10.0
Python 3.6.7
cuDNN and CUDA;
NVIDIA GeForce GTX 1050TI
Keras, Using TensorFlow backend.2.2.4
nvcc: NVIDIA (R) Cuda compiler driver Copyright (c) 2005-2017 NVIDIA
Corporation Built on Fri_Sep__1_21:08:32_Central_Daylight_Time_2017
Cuda compilation tools, release 9.0, V9.0.176
If you need the code;
from keras.models import Sequential
from keras.layers.core import Flatten, Dense, Dropout
from keras.layers.convolutional import Conv2D, MaxPooling2D, ZeroPadding2D
from keras.optimizers import SGD
import cv2, numpy as np
def VGG_16(weights_path=None):
model = Sequential()
model.add(ZeroPadding2D((1,1), input_shape=(224, 224, 3), data_format='channels_last'))
model.add(Conv2D(64, kernel_size=(3, 3), strides=1, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Conv2D(64, kernel_size=(3, 3), strides=1, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2), data_format='channels_last'))
model.add(ZeroPadding2D((1,1)))
model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2), data_format='channels_last'))
model.add(ZeroPadding2D((1,1)))
model.add(Conv2D(256, kernel_size=(3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Conv2D(256, kernel_size=(3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Conv2D(256, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2), data_format='channels_last'))
model.add(ZeroPadding2D((1,1)))
model.add(Conv2D(512, kernel_size=(3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Conv2D(512, kernel_size=(3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Conv2D(512, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2), data_format='channels_last'))
model.add(ZeroPadding2D((1,1)))
model.add(Conv2D(512, kernel_size=(3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Conv2D(512, kernel_size=(3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Conv2D(512, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2), data_format='channels_last'))
model.add(Flatten())
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1000, activation='softmax'))
if weights_path:
model.load_weights(weights_path)
return model
if __name__ == "__main__":
from keras.applications.vgg16 import decode_predictions
im = cv2.resize(cv2.imread('karisik_meyveler.jpg'), (224, 224)).astype(np.float32)
im[:,:,0] -= 103.939
im[:,:,1] -= 116.779
im[:,:,2] -= 123.68
im = im.transpose((1,0,2))
im = np.expand_dims(im, axis=0)
# Test pretrained model
model = VGG_16('vgg16_weights_tf_dim_ordering_tf_kernels.h5')
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy')
out = model.predict(im)
predictions = decode_predictions(out)
The error pops up;
UnknownError Traceback (most recent call last)
<ipython-input-1-9b64406a16ce> in <module>()
69 sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
70 model.compile(optimizer=sgd, loss='categorical_crossentropy')
---> 71 out = model.predict(im)
72 predictions = decode_predictions(out)
Solution: Check the updates for the NVIDIA drivers and update it.

Conv neural network to tell standard 52-card deck apart

I'm using the below keras model to train a neural network to tell 52 game cards 23456789TJQA each with Club, Diamond, Heart and Spade apart.
The model is working quite well but occasionally has problems telling Club and Diamond apart, as they are the most similar (and the difference is quite granular). I was wondering if anybody has some suggestions in what way I can improve the below model?
I've tried different things, like converting everything to black and white, grayscale, smoothing, augmentation etc, but nothing seems to solve that problem.
The pictures are all 15x50 pixels, with 1 channel, so the input shape is (15,50,1)
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape, activation='relu', padding='same'))
model.add(Dropout(0.2))
model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(Dropout(0.2))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
model.add(Dropout(0.2))
model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dropout(0.2))
model.add(Dense(1024, activation='relu', kernel_constraint=maxnorm(3)))
model.add(Dropout(0.2))
model.add(Dense(512, activation='relu', kernel_constraint=maxnorm(3)))
model.add(Dropout(0.2))
model.add(Dense(num_classes, activation='softmax'))