How to connect RNN at the end of a CNN to use to train video frames? [closed] - tensorflow

Closed. This question needs to be more focused. It is not currently accepting answers.
Want to improve this question? Update the question so it focuses on one problem only by editing this post.
Closed 4 years ago.
Improve this question
I'm trying to classify a video as image classification thus to use the frames as the classified method. But i have no idea how to code it out.
I'm using Inception ResNet as my CNN but don't know any RNN or how to use them.

this is ML_machine, here is what I wanted to show you,
this is an implementation of a CNN to classify the mnist data, it is not mine and come from here
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
batch_size = 128
num_classes = 10
epochs = 12
# input image dimensions
img_rows, img_cols = 28, 28
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
to convert this CNN followed by a fully connected layer into a CNN into RNN, simply change the line
model.add(Dense(num_classes, activation='softmax'))
into
model.add(SimpleRNN(num_classes, activation='softmax'))
(of course you need to import it)
you might have to change the input dimension of your network and/or TimeDistribute the whole CNN part, I had trouble in some version of tensorflow and others not
EDIT:
I encountered some problem on my own with the code I gave you, it is harder than I thought because of the dimensions to end a CNN network with a recurrent one, here is how I managed to do it:
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=in_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
# NO MORE POOLING
model.add(Dropout(0.25))
# Reshape with the first argument being the number of filter in your last conv layer
model.add(Reshape((64, -1)))
# Just write this Permute after, its complicated why
model.add(Permute((2, 1)))
# it can also be an LSTM
model.add(SimpleRNN(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
EDIT2, dummy example of a simple fully connected NN in keras:
trng_input = np.random.uniform(size=(1000, 4))
trng_output = np.column_stack([np.sin(trng_input).sum(axis=1), np.cos(trng_input).sum(axis=1)])
model = Sequential()
model.add(Dense(6, input_shape=trng_input.shape, activation='relu'))
model.add(Dense(2, activation='sigmoid'))
model.compile(loss='MSE', optimizer=keras.optimizer.Adam(), metrics=['accuracy'])

Related

CNN accuracy is not improving

I have a dataset of images ( EEG spectrograms ) as given below
Image dimensions are 669X1026. I am using the following code for binary classification of the spectrograms.
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
# dimensions of our images.
img_width, img_height = 669, 1026
train_data_dir = '/home/spectrograms/train'
validation_data_dir = '/home/spectrograms/test'
nb_train_samples = 791
nb_validation_samples = 198
epochs = 100
batch_size = 3
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
else:
input_shape = (img_width, img_height,3)
model = Sequential()
model.add(Conv2D(128, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(256, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(512, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(256, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(16))
model.add(Activation('relu'))
# model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.summary()
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0,
zoom_range=0,
horizontal_flip=False)
# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=nb_validation_samples // batch_size)
model.save_weights('CNN_model.h5')
But I am not able to get a training accuracy greater than 0.53. I have only a limited amount of data ( 790 training samples and 198 testing samples ). So increasing number of input images is not an option. What else can I do to improve the accuracy?
your code
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0,
zoom_range=0,
horizontal_flip=False)
Is not doing any image augmentation, only rescalling. Not sure what type of augmentation may help. Looks like your images really do not rely on color. It probably will not help accuracy but you could reduce computational expense by converting the images to gray scale. You might get some improvement by using the Keras callbacks ReduceLROnPlateau and EarlyStopping. Documentation is here. My suggested code for these callbacks is shown below
rlronp=tf.keras.callbacks.ReduceLROnPlateau(monitor="val_loss", factor=0.5, patience=1,
verbose=1, mode="auto", min_delta=0.0001, cooldown=0, min_lr=0)
estop=tf.keras.callbacks.EarlyStopping(monitor="val_loss", min_delta=0,patience=4,
verbose=1, mode="auto", baseline=None, restore_best_weights=True)
callbacks=[rlronp, estop]
You can try using transfer learning. Most of those models are trained on the imagenet dataset which is dis-similar to the type of images you are using but it might be worth a try. I recommend you use the Mobilenet model. Code for that is shown below
base_model=tf.keras.applications.mobilenet.MobileNet( include_top=False,
input_shape=input_shape, pooling='max', weights='imagenet',dropout=.4)
x=base_model.output
x = Dense(64,activation='relu')(x)
x=Dropout(.3, seed=123)(x)
output=Dense(1, activation='sigmoid')(x)
model=Model(inputs=base_model.input, outputs=output)
model.compile(Adamax(lr=.001), loss='binary_crossentropy', metrics=['accuracy'])
use the callbacks referenced above in model.fit You may get a warning the Mobilenet was trained with an image shape of 224 X 224 X 3 but it should still load the imagenet weights and work.

How to customise a CNN layers with TensorFlow 2, Feed new inputs at Dense Layers of CNN [duplicate]

I have 1D sequences which I want to use as input to a Keras VGG classification model, split in x_train and x_test. For each sequence, I also have custom features stored in feats_train and feats_test which I do not want to input to the convolutional layers, but to the first fully connected layer.
A complete sample of train or test would thus consist of a 1D sequence plus n floating point features.
What is the best way to feed the custom features first to the fully connected layer? I thought about concatenating the input sequence and the custom features, but I do not know how to make them separate inside the model. Are there any other options?
The code without the custom features:
x_train, x_test, y_train, y_test, feats_train, feats_test = load_balanced_datasets()
model = Sequential()
model.add(Conv1D(10, 5, activation='relu', input_shape=(timesteps, 1)))
model.add(Conv1D(10, 5, activation='relu'))
model.add(MaxPooling1D(pool_size=2))
model.add(Dropout(0.5, seed=789))
model.add(Conv1D(5, 6, activation='relu'))
model.add(Conv1D(5, 6, activation='relu'))
model.add(MaxPooling1D(pool_size=2))
model.add(Dropout(0.5, seed=789))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5, seed=789))
model.add(Dense(2, activation='softmax'))
model.compile(loss='logcosh', optimizer='adam', metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=batch_size, epochs=20, shuffle=False, verbose=1)
y_pred = model.predict(x_test)
Sequential model is not very flexible. You should look into the functional API.
I would try something like this:
from keras.layers import (Conv1D, MaxPool1D, Dropout, Flatten, Dense,
Input, concatenate)
from keras.models import Model, Sequential
timesteps = 50
n = 5
def network():
sequence = Input(shape=(timesteps, 1), name='Sequence')
features = Input(shape=(n,), name='Features')
conv = Sequential()
conv.add(Conv1D(10, 5, activation='relu', input_shape=(timesteps, 1)))
conv.add(Conv1D(10, 5, activation='relu'))
conv.add(MaxPool1D(2))
conv.add(Dropout(0.5, seed=789))
conv.add(Conv1D(5, 6, activation='relu'))
conv.add(Conv1D(5, 6, activation='relu'))
conv.add(MaxPool1D(2))
conv.add(Dropout(0.5, seed=789))
conv.add(Flatten())
part1 = conv(sequence)
merged = concatenate([part1, features])
final = Dense(512, activation='relu')(merged)
final = Dropout(0.5, seed=789)(final)
final = Dense(2, activation='softmax')(final)
model = Model(inputs=[sequence, features], outputs=[final])
model.compile(loss='logcosh', optimizer='adam', metrics=['accuracy'])
return model
m = network()

Error when checking target: expected dense_18 to have shape (1,) but got array with shape (10,)

Y_train = to_categorical(Y_train, num_classes = 10)#
random_seed = 2
X_train,X_val,Y_train,Y_val = train_test_split(X_train, Y_train, test_size = 0.1, random_state=random_seed)
Y_train.shape
model = Sequential()
model.add(Flatten())
model.add(Dense(64, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer = 'adam', loss = 'sparse_categorical_crossentropy',metrics=['accuracy'])
model.fit(X_train, Y_train, batch_size = 86, epochs = 3,validation_data = (X_val, Y_val), verbose =2)
I have to classify the MNIST data into 10 classes. I am converting the Y_train into one hot encoded array. I have gone through a number of answers but none have helped. Kindly guide me in this regard as I am a novice in ML and neural network.
It seems there is no need to use model.add(Flatten()) in your first layer. Instead of doing so, you can use a dense layer with a specific input size like: model.add(Dense(64, input_shape=your_input_shape, activation="relu").
To ensure this issue happens because of the layers, you can check whether to_categorical() function works alone with jupyter notebook.
Updated Answer
Before the model, you should reshape your model. In that case 28*28 to 784.
train_images = train_images.reshape((-1, 784))
test_images = test_images.reshape((-1, 784))
I also suggest to normalize the data that could be done by simply dividing the images to 255
After that step you should create your model.
model = Sequential([
Dense(64, activation='relu', input_shape=(784,)),
Dense(64, activation='relu'),
Dense(10, activation='softmax'),
])
Have you noticed input_shape=(784,) That is the shape of your flattened input.
Last step, compiling and fitting.
model.compile(
optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'],
)
model.fit(
train_images,
train_labels,
epochs=10,
batch_size=16,
)
What you do is you have just flattened the input layer without feeding the network with an input. That's why you experience an issue. The point is you should manually reshape your inputs and feed forward to the Dense() layers with parameter input_shape

CNN with multiple conv3d in keras

I am trying to create a CNN model in Keras with multiple conv3d to work on cifar10 dataset. But facing the following issue:
ValueError: ('The specified size contains a dimension with value <=
0', (-8000, 256))
Below is my code that I am trying to execute.
from __future__ import print_function
import keras
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv3D, MaxPooling3D
from keras.optimizers import SGD
import os
from keras import backend as K
batch_size = 128
num_classes = 10
epochs = 20
learning_rate = 0.01
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
img_rows = x_train.shape[1]
img_cols = x_train.shape[2]
colors = x_train.shape[3]
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1,colors, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1,colors, img_rows, img_cols)
input_shape = (1, colors, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, colors, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, colors, 1)
input_shape = (img_rows, img_cols, colors, 1)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv3D(32, kernel_size=(3, 3, 3),activation='relu',input_shape=input_shape))
model.add(Conv3D(32, kernel_size=(3, 3, 3),activation='relu'))
model.add(MaxPooling3D(pool_size=(2, 2, 1)))
model.add(Dropout(0.25))
model.add(Conv3D(64, kernel_size=(3, 3, 3),activation='relu'))
model.add(Conv3D(64, kernel_size=(3, 3, 3),activation='relu'))
model.add(MaxPooling3D(pool_size=(2, 2, 1)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
sgd=SGD(lr=learning_rate)
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=sgd,
metrics=['accuracy'])
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
I have tried with single conv3d and it worked but the accuracy was very low. Code snippet as below:
model = Sequential()
model.add(Conv3D(32, kernel_size=(3, 3, 3),activation='relu',input_shape=input_shape))
model.add(MaxPooling3D(pool_size=(2, 2, 1)))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
Problem
The problem is with the color channel: it equals 3 initially and you're applying the convolution with size 3 and padding='valid'. After the very first Conv3D, the output tensor is:
(None, 30, 30, 1, 32)
... and no more convolutions can be applied to that dimension. The trivial example that you provide is working simply because there's just one convolutional layer.
Solution
One option for you is to set padding='same', so that the tensor shape is preserved:
(None, 32, 32, 3, 32)
However, to me the convolution over colors doesn't add a lot of value, so I'd go with this model:
model = Sequential()
model.add(Conv3D(32, kernel_size=(3, 3, 1), activation='relu', input_shape=input_shape))
model.add(Conv3D(32, kernel_size=(3, 3, 1), activation='relu'))
model.add(MaxPooling3D(pool_size=(2, 2, 1)))
model.add(Dropout(0.25))
model.add(Conv3D(64, kernel_size=(3, 3, 1), activation='relu'))
model.add(Conv3D(64, kernel_size=(3, 3, 1), activation='relu'))
model.add(MaxPooling3D(pool_size=(2, 2, 1)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(10, activation='softmax'))
In practice,in convolution layer the dimension is preserved and in pooling layer you can down sample.
The problem is you are losing dimensionality here. So you can set padding same or use 3X3 filters with one channel instead of using 3 channels.

Tensorflow JS : Convert Saved Model from tensorflow

I am trying to convert a tensorflow saved model to tensorflowjs format using this converter.
But this gives me the error IOError: SavedModel file does not exist at:
Though my directory has the Saved Model. It has:
.data-****-of-****,
.meta and .index files.
Am I missing anything?
I had an issue when I tried tensorflowjs_converter, I'd like to use tfjs.converters.save_keras_model method in your python implementation as follow.
Add just one line and that's it then you can import this model from your tensorflowjs project, like I did.
Example code minist_cnn.py
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
import tensorflowjs as tfjs
batch_size = 128
num_classes = 10
epochs = 12
# input image dimensions
img_rows, img_cols = 28, 28
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
# add this line
tfjs.converters.save_keras_model(model, './SavedModel')
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])