ValueError: Error when checking target: expected dense_2 to have shape (None, 2) but got array with shape (321, 3) - tensorflow

I want to create an image classifier using keras, and train it with a few example images. Then, i will be using pre-trained models and adding a few layers at the end, but first, i want to understand keras and CNNs.
My console prints the following error:
ValueError: Error when checking target: expected dense_2 to have shape
(None, 2) but got array with shape (321, 3)
Here is my code:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import time
import numpy as np
import cv2
import time
from PIL import Image
import keras
import glob
from keras.models import Sequential
from keras.models import load_model
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.optimizers import SGD
from sklearn.preprocessing import LabelBinarizer
labels = ['buena', 'mala', 'otro']
def to_one_hot(labels, ys):
result = np.zeros((len(ys),len(labels)))
for i in range(result.shape[0]):
for j in range(result.shape[1]):
result[i,j] = int(ys[i] == labels[j])
return result
def build_dataset(labels):
num_classes = len(labels)
x = []
y = []
for label in labels:
for filename in (glob.glob('./tf_files/papas_fotos/'+label+'/*.jpg')):
img = cv2.imread(filename)
img = np.resize(img,(100,100, 3))
x.append(img)
y.append(label)
y = to_one_hot(labels, y)
# y = keras.utils.to_categorical(y, num_classes=3)
x = np.array(x)
x_train = x[20:]
y_train = y[20:]
x_test = x[:19]
y_test = y[:19]
print (x.shape, y.shape)
return x_train, y_train, x_test, y_test
model = Sequential()
# input: 100x100 images with 3 channels -> (100, 100, 3) tensors.
# this applies 32 convolution filters of size 3x3 each.
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(100, 100, 3)))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(3, activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd)
x_train, y_train, x_test, y_test = build_dataset(labels)
model = load_model('thebestmodel.h5')
print (model)
model.fit(x_train, y_train, batch_size=32, epochs=20)
score = model.evaluate(x_test, y_test, batch_size=32)
model.save('thebestmodel.h5')
print (score)
What mistake am I making? I think that may be the size of my one hot encoded labels, but i can't make it work.
Thanks!

Although your code was fixed for this specific error, you're loading a saved model: model = load_model('thebestmodel.h5')
This is undoing everything before this line.

Related

Tensorflow: ValueError: Data cardinality is ambiguous:

I recently started learning Tensorflow and am following this guide.
https://pythonprogramming.net/convolutional-neural-network-deep-learning-python-tensorflow-keras/
I am attempting to use my own data sheet with two labels as well (car and not car).
This is my code:
import tensorflow as tf
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
import pickle
pickle_in = open("X.pickle","rb")
X = pickle.load(pickle_in)
pickle_in = open("y.pickle","rb")
y = pickle.load(pickle_in)
X = X/255.0
model = Sequential()
model.add(Conv2D(256, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(256, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(64))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(X, y, batch_size=32, epochs=3, validation_split=0.3)
model.save('car.model')
However, I'm getting an error that I do not understand how to fix.
raise ValueError(msg)
ValueError: Data cardinality is ambiguous:
x sizes: 8406
y sizes: 0
Please provide data which shares the same first dimension.
Appreciate the help!

Can i get the all output keras layers

I just started with deep learning and i want to get the input/output of each layer in real-time. I am using google colab with tensorflow 2 and python 3. I tried to get the layers like this but for some reason that i don't understand is not working. Any help will be appreciated.
# Here are imports
from __future__ import absolute_import, division, print_function, unicode_literals
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
import matplotlib.pyplot as plt
from tensorflow.keras import backend as K
# I am using CIFAR10 dataset
(train_images, train_labels), (test_images, test_labels) =
datasets.cifar10.load_data()
Normalize pixel values to be between 0 and 1
train_images, test_images = train_images / 255.0, test_images / 255.0
# Here is the model
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10, activation='softmax'))
# Compilation of the model
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(train_images, train_labels, epochs=10,
validation_data=(test_images, test_labels))
# Based on
https://stackoverflow.com/questions/41711190/keras-how-to-get-the-output-of-each-layer
# I tried this
tf.compat.v1.disable_eager_execution()
inp = model.input # input placeholder
outputs = [layer.output for layer in model.layers] # all layer outputs
functors = [K.function([inp, K.learning_phase()], [out]) for out in outputs] # evaluation functions
Testing
test = np.random.random(input_shape)[np.newaxis,...]
layer_outs = [func([test, 1.]) for func in functors]
print(layer_outs)
#The error appear at line
functors = [K.function([inp, K.learning_phase()], [out]) for out in outputs]
#I got this error message
Tensor Tensor("conv2d/Identity:0", shape=(None, 30, 30, 32), dtype=float32) is not an element of this graph.
This error basically tells you that you want to change the graph after compiling it. When you call compile, TF will statically define all operations. You have to move the code snippet where you define functors above the compile method. Just swap the last lines with these ones:
tf.compat.v1.disable_eager_execution()
inp = model.input # input placeholder
outputs = [layer.output for layer in model.layers] # all layer outputs
functors = [K.function([inp, K.learning_phase()], [out]) for out in outputs] # evaluation functions
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(train_images, train_labels, epochs=1,
validation_data=(test_images, test_labels))
#Testing
input_shape = [1] + list(model.input_shape[1:])
test = np.random.random(input_shape)
layer_outs = [func([test, 1.]) for func in functors]
print(layer_outs)

Sampled_Softmax input parametrers

I am working on a Speaker recognition problem, I have very big number of classes so I need to use, tf.nn.sampled_softmax_loss to speed up training time. The problem is I am using Keras with Tensorflow as Backend, but Keras doesnt implement Sampled_softmax so I need to use Tensorflow function, but its unclear which should be the inputs of the tf.nn.sampled_softmax_loss() function.
My model and input are as below:
from preprocess import *
import keras
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D,AveragePooling2D
from keras.utils import to_categorical
from keras.utils import plot_model
from keras.models import load_model
from keras import regularizers
import numpy as np
from keras.callbacks import EarlyStopping
from keras import metrics
import os
import tensorflow as tf
from tensorflow.python.framework import dtypes
from importance_sampling.training import ImportanceTraining
epochs = 50
batch_size = 100
verbose = 1
labels= get_labels(big_numpy_files_path)
num_classes = len(labels)
#save_data_as_numpy_array(max_len = feature_dim_1, max_len2 = feature_dim_2,origin_path=data_set_path,destination_path=numpy_files_path)
#Get X & Y
X_train, X_valid, X_test ,y_train,y_valid,y_test = get_x_y_data(split_ratio=0.8, random_state=42,maxsamples=20,path=big_numpy_files_path)
#reshape X for input into CNN
X_train, X_valid, X_test = reshape_prepare_for_input(X1=X_train, X2=X_valid, X3=X_test,channel=1)
#Dimensions
dim_1 = X_train.shape[1]
dim_2 = X_train.shape[2]
dim_3 = X_train.shape[3]
#one hot encoding of Y
y_train_hot = to_categorical(y_train)
y_valid_hot = to_categorical(y_valid)
y_test_hot = to_categorical(y_test)
#Model
model = Sequential()
model.add(Conv2D(128, kernel_size=(6, 6),strides=2, activation='relu', input_shape=(X_train.shape[1], X_train.shape[2], X_train.shape[3])))
model.add(Conv2D(64, kernel_size=(2, 2),strides=1, activation='relu'))
model.add(Conv2D(32, kernel_size=(2, 2), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.3))
model.add(Flatten())
model.add(Dense(256, activation='relu', use_bias=True,kernel_regularizer=regularizers.l2(0.01)))
model.add(Dropout(0.2))
model.add(Dense(256, activation='relu',use_bias=True,kernel_regularizer=regularizers.l2(0.02)))
model.add(Dropout(0.3))
model.add(Dense(256, activation='relu',use_bias=True,kernel_regularizer=regularizers.l2(0.02)))
model.add(Dropout(0.3))
model.add(Dense(num_classes, activation='softmax'))
loss=keras.losses.categorical_crossentropy
model.compile(loss=loss,optimizer='adamax',metrics=['accuracy'])
earlystopping = EarlyStopping(monitor='val_loss', min_delta=0.001, patience=15, verbose=verbose, mode='auto')
model.fit(X_train, y_train_hot,batch_size=batch_size,callbacks=[earlystopping],epochs=epochs, verbose=verbose, validation_data=(X_valid, y_valid_hot))
If I want to replace the above loss function with Samled softmax, If I'm trying to do something like below, what should be the inputs give my above architecture and should I add tf.reduce_mean?
for more details on the code, is https://github.com/selimelawwa/Speaker_Verification

what will happen if an image for testing my cnn model which is trained to detect potholes and garbage receive an non pothole or non garbage image?

I am using keras from tensorflow backend to detect potholes and garbage in my trained model. The output generated for detecting potholes or garbage works fine but when i give a random image of a car or bike or cat or human or building it identifies everything as a garbage class.what should i do now? what should be the optimal output for such cases. I am providig my code here.
#train
import numpy as np
np.random.seed(123) # for reproducibility
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from dataset_pothole import pothole
from keras.models import model_from_json
# 4. Load pre-shuffled MNIST data into train and test sets
(X_train, y_train), (X_test, y_test) = pothole.load_data()
# 5. Preprocess input data
X_train = X_train.reshape(X_train.shape[0], 50, 50, 3)
X_test = X_test.reshape(X_test.shape[0], 50, 50, 3)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
# 6. Preprocess class labels
Y_train = np_utils.to_categorical(y_train, 2)
Y_test = np_utils.to_categorical(y_test, 2)
# 7. Define model architecture
model = Sequential()
model.add(Convolution2D(32, 3, 3, activation='relu', input_shape=(50, 50, 3)))
model.add(Convolution2D(32, 3, 3, activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(3, activation='softmax'))
# 8. Compile model
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# 9. Fit model on training data
model.fit(X_train, Y_train,
batch_size=32, nb_epoch=20, verbose=1)
# 10. Evaluate model on test data
score = model.evaluate(X_test, Y_test, verbose=0)
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("model.h5")
print("Saved model to disk")
print('Test loss: ', score[0])
print('Test accuracy: ', score[1])
#evaluation
import numpy as np
np.random.seed(123) # for reproducibility
import keras
from keras.utils import np_utils
from keras.models import model_from_json
import os
from PIL import Image
from numpy import *
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
img = input("Please enter the test filename:")
test = array(array(Image.open(img)).flatten())
print(test.shape)
X_test = test.reshape((1, 50, 50, 3))
print(X_test.shape)
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("model.h5")
#loaded_model.compile(loss='categorical_crossentropy',
# optimizer='adam',
# metrics=['accuracy'])
prediction = loaded_model.predict_classes(X_test)
print(prediction)
print(loaded_model.predict(X_test))
if prediction==[1]:
print("Pothole")
elif prediction==[0]:
print("Garbage")
else:
print("Invalid Image!")

About use keras for multi-label classification

I try to train a multi-labels classifier, I used sigmoid units in the output layer and then use "binary_crossentrpy" loss. Current problem is the results of the training and testing were ideal, values of loss and accuracy were great.But when I used model.predict() predicted label, the output don't match the real label value. How to change code to solve it?
The shape of the training set and testing set is (-1, 1, 300, 300), the shape of the target label is (-1, 478), I have 478 in total.
My complete code:
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Activation, Convolution2D, MaxPooling2D, Flatten, Dropout
from keras.optimizers import Adam
X = np.load('./data/X_train.npy')
y = np.load('./data/Y_train.npy')
X_train, y_train = X[:2000], y[:2000]
X_test, y_test = X[2000:], y[2000:]
model = Sequential()
model.add(Convolution2D(nb_filter=32, nb_row=5, nb_col=5, padding='same', input_shape=(1, 300, 300)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
model.add(Dropout(0.25))
model.add(Convolution2D(64, 5, 5, padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), border_mode='same'))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(1024))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(478))
model.add(Activation('sigmoid'))
model.compile(optimizer=Adam(lr=1e-4), loss='binary_crossentropy', metrics=['accuracy'])
print('Training ------------')
model.fit(X_train, y_train, epochs=5, batch_size=300, validation_data=(X_test, y_test), verbose=1)
model.save('model.h5')
Could you help me to find a solution? Thanks!
Have you tried to filter the values based on a threshold?
pred = model.predict(x_test)
pred[pred>=0.5] = 1
pred[pred<0.5] = 0
print(pred[0:5])