Training CNN Model and accuracy stays at 1 - tensorflow

I've been trying to train this CNN Model, It's a Tensorflow tutorial and I just changed the dataset ( I used fruit 360 dataset) without altering the core of the code. When it finishes training the accuracy stays constant at 0.8565 it doesn't change and when I try and test some images it almost always wrong.
What am I doing wrong?
Code output after executing
Here's the code I used
[enter image description here][1]import matplotlib.pyplot as plt
import numpy as np
import os
import PIL
import tensorflow as tf
import tarfile
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
from tensorflow.keras import datasets, layers, models
from tensorflow import keras
import pathlib
dataset_url = "https://file.io/z5JM3sYAWXv4"
data_dir = tf.keras.utils.get_file(origin=dataset_url,
fname='tomatos',
untar=True,
extract=True)
data_dir = pathlib.Path(data_dir)
print(data_dir)
file_count = sum(len(files) for _, _, files in os.walk(r'tomatos'))
print(file_count)
batch_size = 32
img_height = 180
img_width = 180
train_ds = tf.keras.utils.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="training",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
val_ds = tf.keras.utils.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="validation",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
class_names = train_ds.class_names
print(class_names)
AUTOTUNE = tf.data.AUTOTUNE
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
num_classes = len(class_names)
model = Sequential([
layers.Rescaling(1./255, input_shape=(img_height, img_width, 3)),
layers.Conv2D(16, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dense(num_classes)
])
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.summary()
epochs=2
history = model.fit(
train_ds,
validation_data=val_ds,
epochs=epochs
)
data_augmentation = keras.Sequential(
[
layers.RandomFlip("horizontal",
input_shape=(img_height,
img_width,
3)),
layers.RandomRotation(0.1),
layers.RandomZoom(0.1),
]
)
model = Sequential([
data_augmentation,
layers.Rescaling(1./255),
layers.Conv2D(16, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Dropout(0.2),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dense(num_classes)
])
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.summary()
epochs = 4
history = model.fit(
train_ds,
validation_data=val_ds,
epochs=epochs
)
sunflower_url = "https://puffycarrot.com/wp-content/uploads/2017/04/Green-tomatoes.jpg"
sunflower_path = tf.keras.utils.get_file('tomato2', origin=sunflower_url)
img = tf.keras.utils.load_img(
sunflower_path, target_size=(img_height, img_width)
)
img_array = tf.keras.utils.img_to_array(img)
img_array = tf.expand_dims(img_array, 0) # Create a batch
predictions = model.predict(img_array)
score = tf.nn.softmax(predictions[0])
print(
"This image most likely belongs to {} with a {:.2f} percent confidence."
.format(class_names[np.argmax(score)], 100 * np.max(score))
)

#Yaman Tarawneh, I tried replicating your above mentioned code in Google colab (using TF 2.8) and in Pycharm (using TF 2.7) and did not find the error.
Please check the output image for Pycharm :
and got the same output in Google colab :
Total params: 3,988,898
Trainable params: 3,988,898
Non-trainable params: 0
_________________________________________________________________
Epoch 1/4
78/78 [==============================] - 8s 41ms/step - loss: 0.0309 - accuracy: 0.9835 - val_loss: 5.6374e-07 - val_accuracy: 1.0000
Epoch 2/4
78/78 [==============================] - 2s 25ms/step - loss: 5.7533e-07 - accuracy: 1.0000 - val_loss: 2.7360e-07 - val_accuracy: 1.0000
Epoch 3/4
78/78 [==============================] - 2s 25ms/step - loss: 3.0400e-07 - accuracy: 1.0000 - val_loss: 1.3978e-07 - val_accuracy: 1.0000
Epoch 4/4
78/78 [==============================] - 2s 25ms/step - loss: 1.7403e-07 - accuracy: 1.0000 - val_loss: 7.2102e-08 - val_accuracy: 1.0000
This image most likely belongs to Tomato not Ripened with a 100.00 percent confidence.
For further analysis if the issue still persists, Please let us know which Python and Tensorflow version are you using.

Related

The second epoch's initial loss is not consistently related to the first epoch's final loss. The loss and the accuracy remain constant every epoch

The second loss is not consistently related to the first epoch. After that, every initial loss always stays the same every epoch. And all these parameters stay the same. I have some background in deep learning, but this is my first time implementing my own model so I want to know what's going wrong with my model intuitively. The dataset is the cropped face with two classifications each having 300 pictures. I highly appreciate your help.
import tensorflow as tf
from tensorflow import keras
from IPython.display import Image
import matplotlib.pyplot as plt
from keras.layers import ActivityRegularization
from keras.layers import Dropout
from tensorflow.keras.preprocessing.image import ImageDataGenerator
image_generator = ImageDataGenerator(
featurewise_center=False, samplewise_center=False,
featurewise_std_normalization=False, samplewise_std_normalization=False,
rotation_range=0, width_shift_range=0.0, height_shift_range=0.0,
brightness_range=None, shear_range=0.0, zoom_range=0.0, channel_shift_range=0.0,
horizontal_flip=False, vertical_flip=False, rescale=1./255
)
image = image_generator.flow_from_directory('./util/untitled folder',batch_size=938)
x, y = image.next()
x_train = x[:500]
y_train = y[:500]
x_test = x[500:600]
y_test = y[500:600]
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(4)
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(4)
plt.imshow(x_train[0])
def convolutional_model(input_shape):
input_img = tf.keras.Input(shape=input_shape)
x = tf.keras.layers.Conv2D(64, (7,7), padding='same')(input_img)
x = tf.keras.layers.BatchNormalization(axis=3)(x)
x = tf.keras.layers.ReLU()(x)
x = tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=1, padding='same')(x)
x = Dropout(0.5)(x)
x = tf.keras.layers.Conv2D(128, (3, 3), padding='same', strides=1)(x)
x = tf.keras.layers.ReLU()(x)
x = tf.keras.layers.MaxPool2D(pool_size=(2, 2), padding='same', strides=4)(x)
x = tf.keras.layers.Flatten()(x)
x = ActivityRegularization(0.1,0.2)(x)
outputs = tf.keras.layers.Dense(2, activation='softmax')(x)
model = tf.keras.Model(inputs=input_img, outputs=outputs)
return model
conv_model = convolutional_model((256, 256, 3))
conv_model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.SGD(lr=1),
metrics=['accuracy'])
conv_model.summary()
conv_model.fit(train_dataset,epochs=100, validation_data=test_dataset)
Epoch 1/100
2021-12-23 15:06:22.165763: W tensorflow/core/platform/profile_utils/cpu_utils.cc:128] Failed to get CPU frequency: 0 Hz
2021-12-23 15:06:22.172255: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:112] Plugin optimizer for device_type GPU is enabled.
125/125 [==============================] - ETA: 0s - loss: 804.6805 - accuracy: 0.48602021-12-23 15:06:50.936870: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:112] Plugin optimizer for device_type GPU is enabled.
125/125 [==============================] - 35s 275ms/step - loss: 804.6805 - accuracy: 0.4860 - val_loss: 0.7197 - val_accuracy: 0.4980
Epoch 2/100
125/125 [==============================] - 34s 270ms/step - loss: 0.7360 - accuracy: 0.4820 - val_loss: 0.7197 - val_accuracy: 0.4980
Epoch 3/100
125/125 [==============================] - 34s 276ms/step - loss: 0.7360 - accuracy: 0.4820 - val_loss: 0.7197 - val_accuracy: 0.4980
As you have a constant loss + accuracy, it is highly likely that your network does not learn anything (since you have two classes, it always predicts one of them).
The activation function, loss function and number of neurons on the last layer are correct.
The problem is not related to they way you load the images, but to the learning rate which is 1. At such a high learning rate, it is impossible for the network to be able to learn anything.
You should start with a much smaller learning rate, for example 0.0001 or 0.00001, and then try to debug the data-loading process if you still have poor performance.
I am quite certain that it has something to do with how you load the data, and more specifically the x, y = image.next() part. If you are able to split the data from ./util/untitled folder to separate folders having training and validation data respectively, you could use the same kind on pipeline as in the examples section on Tensorflow page:
train_datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
rotation_range=0,
width_shift_range=0.0,
height_shift_range=0.0,
brightness_range=None,
shear_range=0.0,
zoom_range=0.0,
channel_shift_range=0.0,
horizontal_flip=False,
vertical_flip=False,
rescale=1./255)
test_datagen = ImageDataGenerator(featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
rotation_range=0,
width_shift_range=0.0,
height_shift_range=0.0,
brightness_range=None,
shear_range=0.0,
zoom_range=0.0,
channel_shift_range=0.0,
horizontal_flip=False,
vertical_flip=False,
rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(256, 256),
batch_size=4)
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(256, 256),
batch_size=4)
model.fit(
train_generator,
epochs=100,
validation_data=validation_generator)

Image classification CNN model always predicts the same value

I have a image dataset which have the following structure:
money_photo/
100/
50/
10/
1/
Each directory have inside 240 photos with the corresponding banknote value(100, 50, 10 and 1).
I'm splitting train and val datasets with keras.preprocessing.image_dataset_from_directory like in the following lines:
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="training",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
Found 960 files belonging to 4 classes.
Using 768 files for training.
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="validation",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
Found 960 files belonging to 4 classes.
Using 192 files for validation.
Every image is scalled to 180x180 pixels and its normalised (0..255 pixel values have corresponding values between 0<=value<=1)
Model is defined below:
num_classes = 4
model = tf.keras.Sequential([
layers.experimental.preprocessing.Rescaling(1./255),
layers.Conv2D(32, 3, activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, activation='relu'),
layers.MaxPooling2D(),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dense(num_classes),
layers.Activation('softmax')
])
After training I have the follwing results:
Epoch 3/3
24/24 [==============================] - 10s 425ms/step - loss: 0.3214 - accuracy: 0.8866 - val_loss: 0.2449 - val_accuracy: 0.9115
The way that I'm using the model to predict:
import tensorflow as tf
from PIL import Image
import numpy as np
from skimage import transform
def load(filename):
np_image = Image.open(filename)
np_image = np.array(np_image).astype('float32')/255
np_image = transform.resize(np_image, (180, 180, 3))
np_image = np.expand_dims(np_image, axis=0)
return np_image
image = load('abd.jpg')
prediction = model.predict(image)
print(class_names[np.argmax(prediction)])
Why I'm getting always the same predicted value?
Your model has a rescaling layer built into it so you should not rescale your input image. Just change
np_image = np.array(np_image).astype('float32')/255
to
np_image = np.array(np_image).astype('float32')

AttributeError: module 'tensorflow_core.compat.v2' has no attribute '__internal__'

import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD
import matplotlib.pyplot as plt
(x_train, y_train), (x_test, y_test) = mnist.load_data()
print(x_train.shape, y_train.shape)
print(x_test.shape, y_test.shape)
im = plt.imshow(x_train[0], cmap="gray")
plt.show()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train/255
x_test = x_test/255
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
model = Sequential()
model.add(Dense(512, activation='relu', input_shape=(784,)))
model.add(Dense(256, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.summary()
model.compile(optimizer=SGD(), loss='categorical_crossentropy', metics=['accuracy'])
model.fit(x_train, y_train, batch_size=64, epochs=5, validation_data=(x_test, y_test))
I tried several different versions of the combination, but still reported an error about
AttributeError: module 'tensorflow_core.compat.v2' has no attribute 'internal'
AttributeError: module 'tensorflow_core.compat.v2' has no attribute '__internal__'
Generally you will get above error due to incompatibility between Tensorflow and Keras. You can import keras without any issues by upgrade to latest version. For more details you can refer this solution.
Coming to your code, have couple of issues and it can be resolved
1.to_categorical has now packed in np_utils. You need to add import as shown below
from keras.utils.np_utils import to_categorical
2.Typo mistake, replace metics to metrics in model.compile
Working code as shown below
import keras
print(keras.__version__)
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD
import matplotlib.pyplot as plt
from keras.utils.np_utils import to_categorical
(x_train, y_train), (x_test, y_test) = mnist.load_data()
print(x_train.shape, y_train.shape)
print(x_test.shape, y_test.shape)
im = plt.imshow(x_train[0], cmap="gray")
plt.show()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train/255
x_test = x_test/255
y_train = to_categorical(y_train, 10)
y_test = to_categorical(y_test, 10)
model = Sequential()
model.add(Dense(512, activation='relu', input_shape=(784,)))
model.add(Dense(256, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.summary()
model.compile(optimizer=SGD(), loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=64, epochs=5, validation_data=(x_test, y_test))
Output:
2.5.0
(60000, 28, 28) (60000,)
(10000, 28, 28) (10000,)
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense (Dense) (None, 512) 401920
_________________________________________________________________
dense_1 (Dense) (None, 256) 131328
_________________________________________________________________
dense_2 (Dense) (None, 10) 2570
=================================================================
Total params: 535,818
Trainable params: 535,818
Non-trainable params: 0
_________________________________________________________________
Epoch 1/5
938/938 [==============================] - 21s 8ms/step - loss: 1.2351 - accuracy: 0.6966 - val_loss: 0.3644 - val_accuracy: 0.9011
Epoch 2/5
938/938 [==============================] - 7s 7ms/step - loss: 0.3554 - accuracy: 0.9023 - val_loss: 0.2943 - val_accuracy: 0.9166
Epoch 3/5
938/938 [==============================] - 7s 7ms/step - loss: 0.2929 - accuracy: 0.9176 - val_loss: 0.2553 - val_accuracy: 0.9282
Epoch 4/5
938/938 [==============================] - 7s 7ms/step - loss: 0.2538 - accuracy: 0.9281 - val_loss: 0.2309 - val_accuracy: 0.9337
Epoch 5/5
938/938 [==============================] - 7s 8ms/step - loss: 0.2313 - accuracy: 0.9355 - val_loss: 0.2096 - val_accuracy: 0.9401
<keras.callbacks.History at 0x7f615c82d090>
You can refer this gist,for the above use case in tensorflow version.
Error:
AttributeError: module 'tensorflow_core.compat.v2' has no attribute '__internal__'
Solution:
Install Libraries
!pip install tensorflow==2.1
!pip install keras==2.3.1
Import
from tensorflow.keras.models import load_model

Accuracy of Auto Encoder (Image Reconstruction) model in Keras

I am trying to make a simple autoencoder model for Image reconstruction along with MSNIT dataset.
Now if I run this model, it presents me with accuracy.
'60000/60000 [==============================] - 5s 83us/sample - loss: 0.0373 - accuracy: 0.2034 - val_loss: 0.0368 - val_accuracy: 0.217
'.
but I am not sure how its calculated given that the prediction results itself are an image.
I dug deep till function "sparse_categorical_accuracy" but was not able to reach any conclusion about the formula for accuracy.
latent_dim = 64
class Autoencoder(Model):
def __init__(self, latent_dim):
super(Autoencoder, self).__init__()
self.latent_dim = latent_dim
self.encoder = tf.keras.Sequential([
layers.Flatten(),
layers.Dense(latent_dim, activation='relu'),
])
self.decoder = tf.keras.Sequential([
layers.Dense(784, activation='sigmoid'),
layers.Reshape((28, 28))
])
def call(self, x):
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return decoded
autoencoder = Autoencoder(latent_dim)
autoencoder.compile(optimizer='adam', loss=losses.MeanSquaredError(),
metrics=['accuracy'])
autoencoder.fit(x_train, x_train,
epochs=10,
shuffle=True,
validation_data=(x_test, x_test))

Regressing on an image to predict a scalar

Given 256x256 rgb input images, I'm trying to regress to predict a point on the X axis of the image (0-48000)
Initially, I tried [mobile_net -> GlobalAveragePooling2D -> several Dense layers]. I didn't realize Pooling was discarding the spatial information.
Last night, I trained on a simpler net, the loss decreased all night, but it's predicting negative values.
How can I modify this architecture to predict a 0-48000 scalar?
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(64, kernel_size=3, activation='relu', input_shape=(256,256,3)),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Conv2D(32, kernel_size=3, activation='relu'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(1, kernel_initializer='normal'),
])
model.compile(loss='mse', optimizer='adam', metrics=['mse', 'mae', 'mape']) #
EDIT:
Inferring from my netwrok, I'm getting vastly different outputs, each run, for the SAME file. How is that possible?
Infer outputs, running multiple times on the same file:
-312864.9444580078
762.7029418945312
193352.7603149414
Here is the inference fn:
def infer(checkpoint_path):
png_file = ['3023_28338_26_m.png', '3023_28338_26_m.png'][1]
test_file = data_root + png_file
onset = png_file.strip('_m.png.').split('_')[1]
img = load_and_preprocess_from_path_label(test_file, 0)
tst = np.expand_dims(img[0], axis=0)
model = load_model_and_checkpoint(checkpoint_path)
val = model.predict(tst)[0][0] * 48000
Here is the final epoch of training.
2019-05-26 11:11:56.698907: I tensorflow/core/kernels/data/shuffle_dataset_op.cc:150] Shuffle buffer filled.
94/95 [============================>.] - ETA: 0s - loss: 0.0063 - mse: 0.0063 - mae: 0.0627 - mape: 93.2817
Epoch 00100: saving model to /media/caseybasichis/sp_data/sp_data/datasets/one_sec_onset_01/model7.ckpt
95/95 [==============================] - 47s 500ms/step - loss: 0.0063 - mse: 0.0063 - mae: 0.0626 - mape: 93.2076
Here is the latest network.
mobile_net = tf.keras.applications.ResNet50(input_shape=(256, 256, 3), include_top=False, weights='imagenet')
mobile_net.trainable=False
model = tf.keras.Sequential([
mobile_net,
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, kernel_initializer='normal', activation='relu'),
tf.keras.layers.BatchNormalization(axis=chanDim),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(1, kernel_initializer='normal', activation='linear'), # activation='sigmoid'
])
model.compile(loss='mse', optimizer='adam', metrics=['mse', 'mae', 'mape']) # mean_squared_logarithmic_error
You can simply use Sigmoid activation on the last layer and multiply the output by the scale (in a Lambda layer or preferably just scale the output out side the network)
model.add(Activation('sigmoid'))
model.add(Lambda(lambda x: 48000*x))
or
model.add(Activation('sigmoid'))
...
model.fit(x_train, y_train/48000.0)