Related
I am trying to create a model using Tensorflow and Python, I get the data from a folder on my pc
The Folder Structure
An Example from the data
Almost all data are the same size [237 items,223 items,495 items,387 items,301 items]
That's how I load my data:
lables = {'Basic T-shrit':0, 'Bikini Bottom':1, 'Cargo Pants':2, 'Jeans':3, 'Oversize T-Shirt':4}
#Data
train_datagen = ImageDataGenerator(rescale=1/256)
train_generator = train_datagen.flow_from_directory(
'Dataset', # This is the source directory for training images
target_size=(256, 256), # All images will be resized to 200 x 200
batch_size=batch_size,
# Specify the classes explicitly
classes = lables,
# Since we use categorical_crossentropy loss, we need categorical labels
class_mode='categorical')
That's a model I tried:
#Model
model = Sequential()
model.add(Conv2D(32, (3,3), 1, activation='relu', input_shape=(image_width,image_height,3)))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(32,3,3, activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(64,3,3, activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(64,3,3, activation='relu'))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(categorys_size, activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer="adam",
metrics=['acc'])
Then I start the learning process:
model.fit_generator(train_generator,
steps_per_epoch=epoch_steps,
epochs=Epoch,
validation_data=train_generator)
But it's not working good, The model sees the oversize shirt and normal shirt the same and any kind of pants as jeans
Model Train result
Then I tested this model:
model = tf.keras.models.Sequential([
keras.layers.Conv2D(32, kernel_size=(5, 5), activation=tf.keras.activations.relu, input_shape=IMAGE_SHAPE),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.BatchNormalization(axis = 1),
keras.layers.Dropout(0.22),
keras.layers.Conv2D(32, kernel_size=(5, 5), activation=tf.keras.activations.relu),
keras.layers.AveragePooling2D(pool_size=(2, 2)),
keras.layers.BatchNormalization(axis = 1),
keras.layers.Dropout(0.25),
keras.layers.Conv2D(32, kernel_size=(4, 4), activation=tf.keras.activations.relu),
keras.layers.AveragePooling2D(pool_size=(2, 2)),
keras.layers.BatchNormalization(axis = 1),
keras.layers.Dropout(0.15),
keras.layers.Conv2D(32, kernel_size=(3, 3), activation=tf.keras.activations.relu),
keras.layers.AveragePooling2D(pool_size=(2, 2)),
keras.layers.BatchNormalization(axis = 1),
keras.layers.Dropout(0.15),
keras.layers.Flatten(),
keras.layers.Dense(256, activation=tf.keras.activations.relu,kernel_regularizer=keras.regularizers.l2(0.001)),
#keras.layers.Dropout(0.25),
keras.layers.Dense(64, activation=tf.keras.activations.relu,kernel_regularizer=keras.regularizers.l2(0.001)),
#keras.layers.Dropout(0.1),
keras.layers.Dense(len(lables), activation=tf.keras.activations.softmax)])
model.compile(optimizer=tf.keras.optimizers.Adam(),
loss=tf.keras.losses.sparse_categorical_crossentropy,
metrics=['accuracy'])
Then I start learning:
#Start Learning
checkpoint_path="/chk/cp-{epoch:04d}.ckpt"
cp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path,
save_weights_only=True,
verbose=1,
period=10)
model.fit(train,
epochs=Epoch,callbacks = [cp_callback],
validation_data=val,verbose=1)
And that's how I load the data
data = tf.keras.utils.image_dataset_from_directory('Dataset',labels = 'inferred',image_size = (192,192))
data_iterator = data.as_numpy_iterator()
batch = data_iterator.next()
fig, ax = plt.subplots(ncols=4, figsize=(20,20))
for idx, img in enumerate(batch[0][:4]):
ax[idx].imshow(img.astype(int))
ax[idx].title.set_text(batch[1][idx])
#Scale Data
data = data.map(lambda x,y: (x/192, y))
data.as_numpy_iterator().next()
train_size = int(len(data)*.7)
val_size = int(len(data)*.2)
test_size = int(len(data)*.1)
print(train_size)
train = data.take(train_size)
val = data.skip(train_size).take(val_size)
test = data.skip(train_size+val_size).take(test_size)
What I am doing wrong? and is the data I collected good enough for what I am trying to do? Am I missing something ?
Thanks
I'm trying to get the 'logits' out of my Keras CNN classifier.
I have tried the suggested method here: link.
First I created two models to check the implementation :
create_CNN_MNIST CNN classifier that returns the softmax probabilities.
create_CNN_MNIST_logits CNN with the same layers as in (1) with a little twist in the last layer - changed the activation function to linear to return logits.
Both models were fed with the same Train and Test data of MNIST. Then I applied softmax on the logits, I got a different output from the softmax CNN.
I couldn't find a problem in my code. Maybe you could help advise another method to extract 'logits' from the model?
the code:
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
def create_CNN_MNIST_logits() :
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', input_shape=(28, 28, 1)))
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(100, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(10, activation='linear'))
# compile model
opt = SGD(learning_rate=0.01, momentum=0.9)
def my_sparse_categorical_crossentropy(y_true, y_pred):
return keras.losses.categorical_crossentropy(y_true, y_pred, from_logits=True)
model.compile(optimizer=opt, loss=my_sparse_categorical_crossentropy, metrics=['accuracy'])
return model
def create_CNN_MNIST() :
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', input_shape=(28, 28, 1)))
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(100, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(10, activation='softmax'))
# compile model
opt = SGD(learning_rate=0.01, momentum=0.9)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
return model
# load data
X_train = np.load('./data/X_train.npy')
X_test = np.load('./data/X_test.npy')
y_train = np.load('./data/y_train.npy')
y_test = np.load('./data/y_test.npy')
#create models
model_softmax = create_CNN_MNIST()
model_logits = create_CNN_MNIST_logits()
pixels = 28
channels = 1
num_labels = 10
# Reshaping to format which CNN expects (batch, height, width, channels)
trainX_cnn = X_train.reshape(X_train.shape[0], pixels, pixels, channels).astype('float32')
testX_cnn = X_test.reshape(X_test.shape[0], pixels, pixels, channels).astype('float32')
# Normalize images from 0-255 to 0-1
trainX_cnn /= 255
testX_cnn /= 255
train_y_cnn = utils.to_categorical(y_train, num_labels)
test_y_cnn = utils.to_categorical(y_test, num_labels)
#train the models:
model_logits.fit(trainX_cnn, train_y_cnn, validation_split=0.2, epochs=10,
batch_size=32)
model_softmax.fit(trainX_cnn, train_y_cnn, validation_split=0.2, epochs=10,
batch_size=32)
On the evaluation stage, I'll do softmax on the logits to check if its the same as the regular model:
#predict
y_pred_softmax = model_softmax.predict(testX_cnn)
y_pred_logits = model_logits.predict(testX_cnn)
#apply softmax on the logits to get the same result of regular CNN
y_pred_logits_activated = softmax(y_pred_logits)
Now I get different values in both y_pred_logits_activated and y_pred_softmax that lead to different accuracy on the test set.
Your models are probably being trained differently, make sure to set the seed prior to both fit commands so that they're initialised the same weights and have the same train/val split. Also, is the softmax might be incorrect:
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x)
return e_x / e_x.sum(axis=1)
This is numerically equivalent to subtracting the max (https://stackoverflow.com/a/34969389/10475762), and the axis should be 1 if your matrix is of shape [batch, outputs].
I was studying different CNN architectures to predict the CIFAR10 dataset, and I found this interesting Github repository:
https://gist.github.com/wielandbrendel/ccf1ff6f8f92139439be
I tried to run the model, but it was created in 6 years ago and the following Keras command is no longer valid:
model.add(Convolution2D(32, 3, 3, 3, border_mode='full'))
How is this command translated into the modern Keras syntax for Conv2D?
I get an error in Keras when I try to input the sequence of integers in Convolution2D(32, 3, 3, 3, ...)?
I guess 32 is the number of channels, and then we specify a 3x3 kernel size, but I am not sure about the meaning of the last 3 mentioned (4th position).
PS. Changing border_mode into padding = 'valid' or 'same' returns the following error:
model.add(Convolution2D(32, 3, 3, 3, padding='valid'))
TypeError: __init__() got multiple values for argument 'padding'
The gist there you're following is backdated and also has some issues. You don't need to follow this now. Here is the updated version of it. Try this.
Imports and DataSet
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (Dense, Dropout, Activation,
Flatten, Conv2D, MaxPooling2D)
from tensorflow.keras.optimizers import SGD, Adadelta, Adagrad
import tensorflow as tf
# parameters
batch_size = 32
nb_classes = 10
nb_epoch = 5
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = tf.keras.datasets.cifar10.load_data()
# convert class vectors to binary class matrices
Y_train = tf.keras.utils.to_categorical(y_train, nb_classes)
Y_test = tf.keras.utils.to_categorical(y_test, nb_classes)
# train model
X_train = X_train.astype("float32") / 255
X_test = X_test.astype("float32") / 255
X_train.shape, y_train.shape, X_test.shape, y_test.shape
((50000, 32, 32, 3), (50000, 1), (10000, 32, 32, 3), (10000, 1))
Modeling
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(3, 3),
strides=(1, 1), activation='relu', padding="same"))
model.add(Activation('relu'))
model.add(Conv2D(filters=32, kernel_size=(3, 3),
strides=(1, 1), activation='relu', padding="same"))
model.add(Activation('relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=32, kernel_size=(3, 3),
strides=(1, 1), activation='relu', padding="same"))
model.add(Activation('relu'))
model.add(Conv2D(filters=32, kernel_size=(3, 3),
strides=(1, 1), activation='relu', padding="same"))
model.add(Activation('relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
# let's train the model using SGD + momentum (how original).
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
Compile and Run
model.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch)
# test score & top 1 performance
score = model.evaluate(X_test, Y_test, batch_size=batch_size)
y_hat = model.predict(X_test)
yhat = np.argmax(y_hat, 1)
top1 = np.mean(yhat == np.squeeze(y_test))
print('Test score/Top1', score, top1)
The Convolutional2D is now named Conv2D, but there is still an alias for Convolutional2D, so that's not a problem.
The border_mode argument is not available anymore, the equivalent is padding, with options valid or same.
Try both to see if any of those fits the shapes of the outputs and allows to code to work.
Closed. This question needs to be more focused. It is not currently accepting answers.
Want to improve this question? Update the question so it focuses on one problem only by editing this post.
Closed 4 years ago.
Improve this question
I'm trying to classify a video as image classification thus to use the frames as the classified method. But i have no idea how to code it out.
I'm using Inception ResNet as my CNN but don't know any RNN or how to use them.
this is ML_machine, here is what I wanted to show you,
this is an implementation of a CNN to classify the mnist data, it is not mine and come from here
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
batch_size = 128
num_classes = 10
epochs = 12
# input image dimensions
img_rows, img_cols = 28, 28
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
to convert this CNN followed by a fully connected layer into a CNN into RNN, simply change the line
model.add(Dense(num_classes, activation='softmax'))
into
model.add(SimpleRNN(num_classes, activation='softmax'))
(of course you need to import it)
you might have to change the input dimension of your network and/or TimeDistribute the whole CNN part, I had trouble in some version of tensorflow and others not
EDIT:
I encountered some problem on my own with the code I gave you, it is harder than I thought because of the dimensions to end a CNN network with a recurrent one, here is how I managed to do it:
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=in_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
# NO MORE POOLING
model.add(Dropout(0.25))
# Reshape with the first argument being the number of filter in your last conv layer
model.add(Reshape((64, -1)))
# Just write this Permute after, its complicated why
model.add(Permute((2, 1)))
# it can also be an LSTM
model.add(SimpleRNN(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
EDIT2, dummy example of a simple fully connected NN in keras:
trng_input = np.random.uniform(size=(1000, 4))
trng_output = np.column_stack([np.sin(trng_input).sum(axis=1), np.cos(trng_input).sum(axis=1)])
model = Sequential()
model.add(Dense(6, input_shape=trng_input.shape, activation='relu'))
model.add(Dense(2, activation='sigmoid'))
model.compile(loss='MSE', optimizer=keras.optimizer.Adam(), metrics=['accuracy'])
I am trying to create a CNN model in Keras with multiple conv3d to work on cifar10 dataset. But facing the following issue:
ValueError: ('The specified size contains a dimension with value <=
0', (-8000, 256))
Below is my code that I am trying to execute.
from __future__ import print_function
import keras
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv3D, MaxPooling3D
from keras.optimizers import SGD
import os
from keras import backend as K
batch_size = 128
num_classes = 10
epochs = 20
learning_rate = 0.01
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
img_rows = x_train.shape[1]
img_cols = x_train.shape[2]
colors = x_train.shape[3]
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1,colors, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1,colors, img_rows, img_cols)
input_shape = (1, colors, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, colors, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, colors, 1)
input_shape = (img_rows, img_cols, colors, 1)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv3D(32, kernel_size=(3, 3, 3),activation='relu',input_shape=input_shape))
model.add(Conv3D(32, kernel_size=(3, 3, 3),activation='relu'))
model.add(MaxPooling3D(pool_size=(2, 2, 1)))
model.add(Dropout(0.25))
model.add(Conv3D(64, kernel_size=(3, 3, 3),activation='relu'))
model.add(Conv3D(64, kernel_size=(3, 3, 3),activation='relu'))
model.add(MaxPooling3D(pool_size=(2, 2, 1)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
sgd=SGD(lr=learning_rate)
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=sgd,
metrics=['accuracy'])
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
I have tried with single conv3d and it worked but the accuracy was very low. Code snippet as below:
model = Sequential()
model.add(Conv3D(32, kernel_size=(3, 3, 3),activation='relu',input_shape=input_shape))
model.add(MaxPooling3D(pool_size=(2, 2, 1)))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
Problem
The problem is with the color channel: it equals 3 initially and you're applying the convolution with size 3 and padding='valid'. After the very first Conv3D, the output tensor is:
(None, 30, 30, 1, 32)
... and no more convolutions can be applied to that dimension. The trivial example that you provide is working simply because there's just one convolutional layer.
Solution
One option for you is to set padding='same', so that the tensor shape is preserved:
(None, 32, 32, 3, 32)
However, to me the convolution over colors doesn't add a lot of value, so I'd go with this model:
model = Sequential()
model.add(Conv3D(32, kernel_size=(3, 3, 1), activation='relu', input_shape=input_shape))
model.add(Conv3D(32, kernel_size=(3, 3, 1), activation='relu'))
model.add(MaxPooling3D(pool_size=(2, 2, 1)))
model.add(Dropout(0.25))
model.add(Conv3D(64, kernel_size=(3, 3, 1), activation='relu'))
model.add(Conv3D(64, kernel_size=(3, 3, 1), activation='relu'))
model.add(MaxPooling3D(pool_size=(2, 2, 1)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(10, activation='softmax'))
In practice,in convolution layer the dimension is preserved and in pooling layer you can down sample.
The problem is you are losing dimensionality here. So you can set padding same or use 3X3 filters with one channel instead of using 3 channels.