Unable to add fourth convolutional layer - tensorflow

I'm pretty new to maching learning and when I was looking at a tutorial for a convolutional neural network I wanted to experiment on my own on how to increase accuracy. However, when I tried to add another convolutional and pooling layer to my model it displayed an error message. This is before I added the layer:
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(62))
And this is after:
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(62))
This is the error message it gave me:
ValueError: Negative dimension size caused by subtracting 3 from 1 for '{{node conv2d_36/Conv2D}} = Conv2D[T=DT_FLOAT, data_format="NHWC", dilations=[1, 1, 1, 1], explicit_paddings=[], padding="VALID", strides=[1, 1, 1, 1], use_cudnn_on_gpu=true](max_pooling2d_26/MaxPool, conv2d_36/Conv2D/ReadVariableOp)' with input shapes: [?,1,1,64], [3,3,64,64]. site:stackoverflow.com

This is because you reduce the dimensionality too much inside your network. use padding='same' in your convolutional layer to avoid this dimensionality error
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', padding='same',
input_shape=(28, 28, 1)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(62))
model.summary()

Related

ValueError: Shapes (None, 1) and (None, 5) are incompatible in keras

model = Sequential()
model.add(Conv2D(128, (3, 3), activation='relu', input_shape=(64, 64, 3), padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(128, activation = 'relu'))
model.add(Dropout(0.5))
model.add(Dense(5, activation = 'softmax'))
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=[tf.keras.metrics.Recall()])
This code works fine for metrics=['accuracy']), but it shows ValueError: Shapes (None, 1) and (None, 5) are incompatible for metrics=[tf.keras.metrics.Recall()])
Please help me. Thanks in advance.
Recall makes sense only for binary classification. Your final layer has 5 nodes, which essentially means you have 5 classes. You should change recall to another metric. Documentation should help you choose an appropriate metric for your model. Categorical accuracy should be good enough to get started.

Tensor Tensor("flatten/Reshape:0", shape=(?, 2622), dtype=float32) is not an element of this graph

Hello StackOverFlow Team:
I built a model based on (Vgg_Face_Model) with weights loaded (vgg_face_weights.h5).
Note that I use tensorflow-gpu = 2.1.0 , and keras=2.3.1 , with Anaconda 3 create it as interpreter and used with pycharm
But the code shows an error in the part :
input_descriptor = [model.predict(face), img]
The code is:
def face_recognizer(face, db_descriptors):
# face = cv2.imread(img)
# face = cv2.resize(face, (IMG_Size, IMG_Size))
t0 = time.perf_counter()
face = np.array(face).reshape(-1, IMG_Size, IMG_Size, 3)
###### here error #################################
input_descriptor = [model.predict(face), img]
###################################################
K_nn_result = K_nn_Classifier(input_descriptor[0], db_descriptors, 5)
input_result = Knn_Distance_Score(K_nn_result)
if input_result[0] <= 10:
identity = 'stranger'
else:
identity = input_result[1]
# print('Done in',time.perf_counter()-t0)
return input_result, identity
def PrepareModels(self):
global mpFaceDetection, FaceDetector, model
mpFaceDetection = mp.solutions.face_detection
FaceDetector = mpFaceDetection.FaceDetection()
model = loadModel()
Model is:
import os
from pathlib import Path
# from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.models import Model, Sequential, load_model
from tensorflow.keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dense, Dropout, \
Activation
import gdown
# ---------------------------------------
def Vgg_Face_Model():
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(224, 224, 3)))
model.add(Convolution2D(64, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(Convolution2D(4096, (7, 7), activation='relu'))
model.add(Dropout(0.5))
model.add(Convolution2D(4096, (1, 1), activation='relu'))
model.add(Dropout(0.5))
model.add(Convolution2D(2622, (1, 1)))
model.add(Flatten())
model.add(Activation('softmax'))
return model
def loadModel():
model = Vgg_Face_Model()
# -----------------------------------
home = str(Path.home())
if os.path.isfile(home + '/.deepface/weights/vgg_face_weights.h5') != True:
print("vgg_face_weights.h5 will be downloaded...")
url = 'https://drive.google.com/uc?id=1CPSeum3HpopfomUEK1gybeuIVoeJT_Eo'
output = home + '/.deepface/weights/vgg_face_weights.h5'
gdown.download(url, output, quiet=False)
# -----------------------------------
model.load_weights(home + '/.deepface/weights/vgg_face_weights.h5')
# -----------------------------------
# TO-DO: why?
vgg_model_descriptor = Model(inputs=model.layers[0].input, outputs=model.layers[-2].output)
return vgg_model_descriptor
# model = loadModel()
output:
Tensor Tensor("flatten/Reshape:0", shape=(?, 2622), dtype=float32) is not an element of this graph.'
from tensorflow.python.keras.backend import set_session
sess = tf.Session()
#This is a global session and graph
graph = tf.get_default_graph()
set_session(sess)
#now where you are calling the model
global sess
global graph
with graph.as_default():
set_session(sess)
input_descriptor = [model.predict(face), img]

Change input size of vggface pretained model

It been a week that Im trying to change the input size of the pertrained Vgg face model every time I change something it got me an error how can I change the input size from 224x224x3 to 64x64x3 is there a way to change it directly without training again the model just using the pertrained weights directly
model.add(ZeroPadding2D((1, 1),include_top=False,input_shape=(64, 64, 3)))
model.add(Convolution2D(64, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(Convolution2D(4096, (7, 7), activation='relu'))
model.add(Dropout(0.5))
model.add(Convolution2D(4096, (1, 1), activation='relu'))
model.add(Dropout(0.5))
model.add(Convolution2D(2622, (1, 1)))
model.add(Flatten())
model.add(Activation('softmax'))
from keras.models import model_from_json
deep= model.load_weights('/Users/macbookpro/PycharmProjects/untitled/venv/vgg_face_weights.h5')
code:
vggface
When I try to call include_top i got this error:
TypeError: ('Keyword argument not understood:', 'include_top')
When I change directly the input size i got this error:
ValueError: Negative dimension size caused by subtracting 7 from 2 for 'conv2d_14/convolution' (op: 'Conv2D') with input shapes: [?,2,2,512], [7,7,512,4096].
First of all, remove the include_top=False.
Your problem is that this architecture is too deep for a 64x64 input. In particular, this line:
model.add(Convolution2D(4096, (7, 7), activation='relu'))
is trying to perform a 7x7 convolution on an input of size 2x2 which is impossible.
A possible solution is to remove the Convolutions after the last MaxPooling layer and use some Dense layer instead.

Conv neural network to tell standard 52-card deck apart

I'm using the below keras model to train a neural network to tell 52 game cards 23456789TJQA each with Club, Diamond, Heart and Spade apart.
The model is working quite well but occasionally has problems telling Club and Diamond apart, as they are the most similar (and the difference is quite granular). I was wondering if anybody has some suggestions in what way I can improve the below model?
I've tried different things, like converting everything to black and white, grayscale, smoothing, augmentation etc, but nothing seems to solve that problem.
The pictures are all 15x50 pixels, with 1 channel, so the input shape is (15,50,1)
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape, activation='relu', padding='same'))
model.add(Dropout(0.2))
model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(Dropout(0.2))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
model.add(Dropout(0.2))
model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dropout(0.2))
model.add(Dense(1024, activation='relu', kernel_constraint=maxnorm(3)))
model.add(Dropout(0.2))
model.add(Dense(512, activation='relu', kernel_constraint=maxnorm(3)))
model.add(Dropout(0.2))
model.add(Dense(num_classes, activation='softmax'))

Feed CNN features to LSTM

I want to build an end-to-end trainable model with the following proprieties:
CNN to extract features from image
The features is reshaped to a matrix
Each row of this matrix is then fed to LSTM1
Each column of this matrix is then fed to LSTM2
The output of LSTM1 and LSTM2 are concatenated for the final output
(it's more or less similar to Figure 2 in this paper: https://arxiv.org/pdf/1611.07890.pdf)
My problem now is after the reshape, how can I feed the values of feature matrix to LSTM with Keras or Tensorflow?
This is my code so far with VGG16 net (also a link to Keras issues):
# VGG16
model = Sequential()
model.add(Conv2D(64, (3, 3), activation='relu', padding='same', input_shape=(224, 224, 3)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
# block 2
model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
# block 3
model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(256, (3, 3), activation='relu'))
model.add(Conv2D(256, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
# block 4
model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
# block 5
model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
# block 6
model.add(Flatten())
model.add(Dense(4096, activation='relu'))
model.add(Dense(4096, activation='relu'))
# reshape the feature 4096 = 64 * 64
model.add(Reshape((64, 64)))
# How to feed each row of this to LSTM?
# This is my first solution but it doesn’t look correct:
# model.add(LSTM(256, input_shape=(64, 1))) # 256 hidden units, sequence length = 64, feature dim = 1
Consider building your CNN model with Conv2D and MaxPool2D layers, until you reach your Flatten layer, because the vectorized output from the Flatten layer will be you input data to the LSTM part of your structure.
So, build your CNN model like this:
model_cnn = Sequential()
model_cnn.add(Conv2D...)
model_cnn.add(MaxPooling2D...)
...
model_cnn.add(Flatten())
Now, this is an interesting point, the current version of Keras has some incompatibility with some TensorFlow structures that will not let you stack your entire layers in just one Sequential object.
So it's time to use the Keras Model Object to complete you neural network with a trick:
input_lay = Input(shape=(None, ?, ?, ?)) #dimensions of your data
time_distribute = TimeDistributed(Lambda(lambda x: model_cnn(x)))(input_lay) # keras.layers.Lambda is essential to make our trick work :)
lstm_lay = LSTM(?)(time_distribute)
output_lay = Dense(?, activation='?')(lstm_lay)
And finally, now it's time to put together our 2 separated models:
model = Model(inputs=[input_lay], outputs=[output_lay])
model.compile(...)
OBS: Note that you can substitute my model_cnn example by your VGG without including the top layers, once the vectorized output from the VGG Flatten layer will be the input of the LSTM model.