Regressing on an image to predict a scalar - tensorflow

Given 256x256 rgb input images, I'm trying to regress to predict a point on the X axis of the image (0-48000)
Initially, I tried [mobile_net -> GlobalAveragePooling2D -> several Dense layers]. I didn't realize Pooling was discarding the spatial information.
Last night, I trained on a simpler net, the loss decreased all night, but it's predicting negative values.
How can I modify this architecture to predict a 0-48000 scalar?
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(64, kernel_size=3, activation='relu', input_shape=(256,256,3)),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Conv2D(32, kernel_size=3, activation='relu'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(1, kernel_initializer='normal'),
])
model.compile(loss='mse', optimizer='adam', metrics=['mse', 'mae', 'mape']) #
EDIT:
Inferring from my netwrok, I'm getting vastly different outputs, each run, for the SAME file. How is that possible?
Infer outputs, running multiple times on the same file:
-312864.9444580078
762.7029418945312
193352.7603149414
Here is the inference fn:
def infer(checkpoint_path):
png_file = ['3023_28338_26_m.png', '3023_28338_26_m.png'][1]
test_file = data_root + png_file
onset = png_file.strip('_m.png.').split('_')[1]
img = load_and_preprocess_from_path_label(test_file, 0)
tst = np.expand_dims(img[0], axis=0)
model = load_model_and_checkpoint(checkpoint_path)
val = model.predict(tst)[0][0] * 48000
Here is the final epoch of training.
2019-05-26 11:11:56.698907: I tensorflow/core/kernels/data/shuffle_dataset_op.cc:150] Shuffle buffer filled.
94/95 [============================>.] - ETA: 0s - loss: 0.0063 - mse: 0.0063 - mae: 0.0627 - mape: 93.2817
Epoch 00100: saving model to /media/caseybasichis/sp_data/sp_data/datasets/one_sec_onset_01/model7.ckpt
95/95 [==============================] - 47s 500ms/step - loss: 0.0063 - mse: 0.0063 - mae: 0.0626 - mape: 93.2076
Here is the latest network.
mobile_net = tf.keras.applications.ResNet50(input_shape=(256, 256, 3), include_top=False, weights='imagenet')
mobile_net.trainable=False
model = tf.keras.Sequential([
mobile_net,
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, kernel_initializer='normal', activation='relu'),
tf.keras.layers.BatchNormalization(axis=chanDim),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(1, kernel_initializer='normal', activation='linear'), # activation='sigmoid'
])
model.compile(loss='mse', optimizer='adam', metrics=['mse', 'mae', 'mape']) # mean_squared_logarithmic_error

You can simply use Sigmoid activation on the last layer and multiply the output by the scale (in a Lambda layer or preferably just scale the output out side the network)
model.add(Activation('sigmoid'))
model.add(Lambda(lambda x: 48000*x))
or
model.add(Activation('sigmoid'))
...
model.fit(x_train, y_train/48000.0)

Related

Training CNN Model and accuracy stays at 1

I've been trying to train this CNN Model, It's a Tensorflow tutorial and I just changed the dataset ( I used fruit 360 dataset) without altering the core of the code. When it finishes training the accuracy stays constant at 0.8565 it doesn't change and when I try and test some images it almost always wrong.
What am I doing wrong?
Code output after executing
Here's the code I used
[enter image description here][1]import matplotlib.pyplot as plt
import numpy as np
import os
import PIL
import tensorflow as tf
import tarfile
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
from tensorflow.keras import datasets, layers, models
from tensorflow import keras
import pathlib
dataset_url = "https://file.io/z5JM3sYAWXv4"
data_dir = tf.keras.utils.get_file(origin=dataset_url,
fname='tomatos',
untar=True,
extract=True)
data_dir = pathlib.Path(data_dir)
print(data_dir)
file_count = sum(len(files) for _, _, files in os.walk(r'tomatos'))
print(file_count)
batch_size = 32
img_height = 180
img_width = 180
train_ds = tf.keras.utils.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="training",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
val_ds = tf.keras.utils.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="validation",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
class_names = train_ds.class_names
print(class_names)
AUTOTUNE = tf.data.AUTOTUNE
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
num_classes = len(class_names)
model = Sequential([
layers.Rescaling(1./255, input_shape=(img_height, img_width, 3)),
layers.Conv2D(16, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dense(num_classes)
])
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.summary()
epochs=2
history = model.fit(
train_ds,
validation_data=val_ds,
epochs=epochs
)
data_augmentation = keras.Sequential(
[
layers.RandomFlip("horizontal",
input_shape=(img_height,
img_width,
3)),
layers.RandomRotation(0.1),
layers.RandomZoom(0.1),
]
)
model = Sequential([
data_augmentation,
layers.Rescaling(1./255),
layers.Conv2D(16, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Dropout(0.2),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dense(num_classes)
])
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.summary()
epochs = 4
history = model.fit(
train_ds,
validation_data=val_ds,
epochs=epochs
)
sunflower_url = "https://puffycarrot.com/wp-content/uploads/2017/04/Green-tomatoes.jpg"
sunflower_path = tf.keras.utils.get_file('tomato2', origin=sunflower_url)
img = tf.keras.utils.load_img(
sunflower_path, target_size=(img_height, img_width)
)
img_array = tf.keras.utils.img_to_array(img)
img_array = tf.expand_dims(img_array, 0) # Create a batch
predictions = model.predict(img_array)
score = tf.nn.softmax(predictions[0])
print(
"This image most likely belongs to {} with a {:.2f} percent confidence."
.format(class_names[np.argmax(score)], 100 * np.max(score))
)
#Yaman Tarawneh, I tried replicating your above mentioned code in Google colab (using TF 2.8) and in Pycharm (using TF 2.7) and did not find the error.
Please check the output image for Pycharm :
and got the same output in Google colab :
Total params: 3,988,898
Trainable params: 3,988,898
Non-trainable params: 0
_________________________________________________________________
Epoch 1/4
78/78 [==============================] - 8s 41ms/step - loss: 0.0309 - accuracy: 0.9835 - val_loss: 5.6374e-07 - val_accuracy: 1.0000
Epoch 2/4
78/78 [==============================] - 2s 25ms/step - loss: 5.7533e-07 - accuracy: 1.0000 - val_loss: 2.7360e-07 - val_accuracy: 1.0000
Epoch 3/4
78/78 [==============================] - 2s 25ms/step - loss: 3.0400e-07 - accuracy: 1.0000 - val_loss: 1.3978e-07 - val_accuracy: 1.0000
Epoch 4/4
78/78 [==============================] - 2s 25ms/step - loss: 1.7403e-07 - accuracy: 1.0000 - val_loss: 7.2102e-08 - val_accuracy: 1.0000
This image most likely belongs to Tomato not Ripened with a 100.00 percent confidence.
For further analysis if the issue still persists, Please let us know which Python and Tensorflow version are you using.

Keras - Trying to get 'logits' - one layer before the softmax activation function

I'm trying to get the 'logits' out of my Keras CNN classifier.
I have tried the suggested method here: link.
First I created two models to check the implementation :
create_CNN_MNIST CNN classifier that returns the softmax probabilities.
create_CNN_MNIST_logits CNN with the same layers as in (1) with a little twist in the last layer - changed the activation function to linear to return logits.
Both models were fed with the same Train and Test data of MNIST. Then I applied softmax on the logits, I got a different output from the softmax CNN.
I couldn't find a problem in my code. Maybe you could help advise another method to extract 'logits' from the model?
the code:
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
def create_CNN_MNIST_logits() :
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', input_shape=(28, 28, 1)))
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(100, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(10, activation='linear'))
# compile model
opt = SGD(learning_rate=0.01, momentum=0.9)
def my_sparse_categorical_crossentropy(y_true, y_pred):
return keras.losses.categorical_crossentropy(y_true, y_pred, from_logits=True)
model.compile(optimizer=opt, loss=my_sparse_categorical_crossentropy, metrics=['accuracy'])
return model
def create_CNN_MNIST() :
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', input_shape=(28, 28, 1)))
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(100, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(10, activation='softmax'))
# compile model
opt = SGD(learning_rate=0.01, momentum=0.9)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
return model
# load data
X_train = np.load('./data/X_train.npy')
X_test = np.load('./data/X_test.npy')
y_train = np.load('./data/y_train.npy')
y_test = np.load('./data/y_test.npy')
#create models
model_softmax = create_CNN_MNIST()
model_logits = create_CNN_MNIST_logits()
pixels = 28
channels = 1
num_labels = 10
# Reshaping to format which CNN expects (batch, height, width, channels)
trainX_cnn = X_train.reshape(X_train.shape[0], pixels, pixels, channels).astype('float32')
testX_cnn = X_test.reshape(X_test.shape[0], pixels, pixels, channels).astype('float32')
# Normalize images from 0-255 to 0-1
trainX_cnn /= 255
testX_cnn /= 255
train_y_cnn = utils.to_categorical(y_train, num_labels)
test_y_cnn = utils.to_categorical(y_test, num_labels)
#train the models:
model_logits.fit(trainX_cnn, train_y_cnn, validation_split=0.2, epochs=10,
batch_size=32)
model_softmax.fit(trainX_cnn, train_y_cnn, validation_split=0.2, epochs=10,
batch_size=32)
On the evaluation stage, I'll do softmax on the logits to check if its the same as the regular model:
#predict
y_pred_softmax = model_softmax.predict(testX_cnn)
y_pred_logits = model_logits.predict(testX_cnn)
#apply softmax on the logits to get the same result of regular CNN
y_pred_logits_activated = softmax(y_pred_logits)
Now I get different values in both y_pred_logits_activated and y_pred_softmax that lead to different accuracy on the test set.
Your models are probably being trained differently, make sure to set the seed prior to both fit commands so that they're initialised the same weights and have the same train/val split. Also, is the softmax might be incorrect:
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x)
return e_x / e_x.sum(axis=1)
This is numerically equivalent to subtracting the max (https://stackoverflow.com/a/34969389/10475762), and the axis should be 1 if your matrix is of shape [batch, outputs].

TensorFlow 2 Quantization Aware Training (QAT) with tf.GradientTape

Can anyone point to references where one can learn how to perform Quantization Aware Training (QAT) with tf.GradientTape on TensorFlow 2?
I only see this done with the tf.keras API. I do not use tf. keras, I always build customized training with tf.GradientTape provides more control over the training process. I now need to quantize a model but I only see references on how to do it using the tf. keras API.
In the official examples here, they showed QAT training with model. fit. Here is a demonstration of Quantization Aware Training using tf.GradientTape(). But for complete reference, let's do both here.
Base model training. This is directly from the official doc. For more details, please check there.
import os
import tensorflow as tf
from tensorflow import keras
import tensorflow_model_optimization as tfmot
# Load MNIST dataset
mnist = keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# Normalize the input image so that each pixel value is between 0 to 1.
train_images = train_images / 255.0
test_images = test_images / 255.0
# Define the model architecture.
model = keras.Sequential([
keras.layers.InputLayer(input_shape=(28, 28)),
keras.layers.Reshape(target_shape=(28, 28, 1)),
keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation='relu'),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Flatten(),
keras.layers.Dense(10)
])
# Train the digit classification model
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.summary()
model.fit(
train_images,
train_labels,
epochs=1,
validation_split=0.1,
)
10ms/step - loss: 0.5411 - accuracy: 0.8507 - val_loss: 0.1142 - val_accuracy: 0.9705
<tensorflow.python.keras.callbacks.History at 0x7f9ee970ab90>
QAT .fit.
Now, performing QAT over the base model.
# -----------------------
# ------------- Quantization Aware Training -------------
import tensorflow_model_optimization as tfmot
quantize_model = tfmot.quantization.keras.quantize_model
# q_aware stands for for quantization aware.
q_aware_model = quantize_model(model)
# `quantize_model` requires a recompile.
q_aware_model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
q_aware_model.summary()
train_images_subset = train_images[0:1000]
train_labels_subset = train_labels[0:1000]
q_aware_model.fit(train_images_subset, train_labels_subset,
batch_size=500, epochs=1, validation_split=0.1)
356ms/step - loss: 0.1431 - accuracy: 0.9629 - val_loss: 0.1626 - val_accuracy: 0.9500
<tensorflow.python.keras.callbacks.History at 0x7f9edf0aef90>
Checking performance
_, baseline_model_accuracy = model.evaluate(
test_images, test_labels, verbose=0)
_, q_aware_model_accuracy = q_aware_model.evaluate(
test_images, test_labels, verbose=0)
print('Baseline test accuracy:', baseline_model_accuracy)
print('Quant test accuracy:', q_aware_model_accuracy)
Baseline test accuracy: 0.9660999774932861
Quant test accuracy: 0.9660000205039978
QAT tf.GradientTape().
Here is the QAT training part on the base model. Note we can also perform custom training over the base model.
batch_size = 500
train_dataset = tf.data.Dataset.from_tensor_slices((train_images_subset,
train_labels_subset))
train_dataset = train_dataset.batch(batch_size=batch_size,
drop_remainder=False)
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
optimizer = tf.keras.optimizers.Adam()
for epoch in range(1):
for x, y in train_dataset:
with tf.GradientTape() as tape:
preds = q_aware_model(x, training=True)
loss = loss_fn(y, preds)
grads = tape.gradient(loss, q_aware_model.trainable_variables)
optimizer.apply_gradients(zip(grads, q_aware_model.trainable_variables))
_, baseline_model_accuracy = model.evaluate(
test_images, test_labels, verbose=0)
_, q_aware_model_accuracy = q_aware_model.evaluate(
test_images, test_labels, verbose=0)
print('Baseline test accuracy:', baseline_model_accuracy)
print('Quant test accuracy:', q_aware_model_accuracy)
Baseline test accuracy: 0.9660999774932861
Quant test accuracy: 0.9645000100135803

Accuracy of Auto Encoder (Image Reconstruction) model in Keras

I am trying to make a simple autoencoder model for Image reconstruction along with MSNIT dataset.
Now if I run this model, it presents me with accuracy.
'60000/60000 [==============================] - 5s 83us/sample - loss: 0.0373 - accuracy: 0.2034 - val_loss: 0.0368 - val_accuracy: 0.217
'.
but I am not sure how its calculated given that the prediction results itself are an image.
I dug deep till function "sparse_categorical_accuracy" but was not able to reach any conclusion about the formula for accuracy.
latent_dim = 64
class Autoencoder(Model):
def __init__(self, latent_dim):
super(Autoencoder, self).__init__()
self.latent_dim = latent_dim
self.encoder = tf.keras.Sequential([
layers.Flatten(),
layers.Dense(latent_dim, activation='relu'),
])
self.decoder = tf.keras.Sequential([
layers.Dense(784, activation='sigmoid'),
layers.Reshape((28, 28))
])
def call(self, x):
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return decoded
autoencoder = Autoencoder(latent_dim)
autoencoder.compile(optimizer='adam', loss=losses.MeanSquaredError(),
metrics=['accuracy'])
autoencoder.fit(x_train, x_train,
epochs=10,
shuffle=True,
validation_data=(x_test, x_test))

Price Prediction via Neural Network (Keras)

I am trying to do call price prediction my data set looks something like this
call_category_id,duration,Number 1,Number 2,price
9,24,77348,70000,0.01
9,144,77348,70000,0.08
9,138,77348,70000,0.08
9,12,77348,70000,0.01
The dialled number is split into two numbers(number 1, number 2) as i think it will improve prediction result. Usually first few digits of call dictate price per minute.
My model look something like this:
def get_model():
model = Sequential([
Dense(40,
activation='relu',
kernel_initializer='uniform',
input_shape=(4,)),
Dropout(0.3),
Dense(36,
activation='relu',
kernel_initializer='uniform'),
Dropout(0.3),
Dense(32,
activation='relu',
kernel_initializer='uniform'),
Dropout(0.3),
Dense(28,
activation='relu',
kernel_initializer='uniform'),
Dropout(0.3),
Dense(24,
activation='relu',
kernel_initializer='uniform'),
Dense(32,
activation='relu',
kernel_initializer='uniform'),
Dropout(0.3),
Dense(20,
activation='relu',
kernel_initializer='uniform'),
Dropout(0.3),
Dense(1, activation='linear'),
])
c_optimizers = optimizers.Adam()
model.compile(optimizer=c_optimizers,
loss='mean_squared_error',
metrics=['accuracy'])
return model
model.fit(
 x_train,
 y_train,
 batch_size=1024,
 epochs=1000,
 validation_data=(x_test, y_test),
 shuffle=True,
 callbacks=[tensor_board])
However the challenge is accuracy never improves it stuck at 19.6%.
39879/39879 [==============================] - 0s 5us/step - loss: 0.1646 - acc: 0.1969 - val_loss: 0.1003 - val_acc: 0.2065