Reshape the input for BatchDataset trained model - tensorflow

I trained my tensorflow model on images after convert it to BatchDataset
IMG_size = 224
INPUT_SHAPE = [None, IMG_size, IMG_size, 3] # 4D input
model.fit(x=train_data,
epochs=EPOCHES,
validation_data=test_data,
validation_freq=1, # check validation metrics every epoch
callbacks=[tensorboard, early_stopping])
model.compile(
loss=tf.keras.losses.CategoricalCrossentropy(),
optimizer=tf.keras.optimizers.Adam(),
metrics=["accuracy"]
)
model.build(INPUT_SHAPE)
the train_data type is:
tensorflow.python.data.ops.dataset_ops.BatchDataset.
I want to run my model on a single numpy array or tensor constant, but it will be 3D input matrix not 4D as the input TensorShape([224, 224, 3]); how can i reshape it?

You can expand the dimensions of your image matrix by using this code:
newImage = tf.expand_dims(Original_Image, axis = 0)
then pass it to the predict function, it will work fine.

target sizes make all input into the same shape.
It is helpful with input shape or you can use the image function to expand the dimension. img_array = tf.expand_dims(image, 0) # Create a batch
Talking about your input INPUT_SHAPE = [None, IMG_size, IMG_size, 3] # 4D input you can arrange those input images by image training dataset and feeds into the model.
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
BATCH_SIZE = 16
IMG_SIZE = (160, 160)
PATH = 'F:\\datasets\\downloads\\sample\\cats_dogs\\training'
training_directory = os.path.join(PATH, 'train')
validation_directory = os.path.join(PATH, 'validation')
train_dataset = tf.keras.utils.image_dataset_from_directory(training_directory,
shuffle=True,
batch_size=BATCH_SIZE,
image_size=IMG_SIZE,
seed=42)
validation_dataset = tf.keras.utils.image_dataset_from_directory(validation_directory,
shuffle=True,
batch_size=BATCH_SIZE,
image_size=IMG_SIZE,
seed=42)
class_names = train_dataset.class_names
print( "class_names: " + str( class_names ) )
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
DataSet
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
AUTOTUNE = tf.data.experimental.AUTOTUNE
train_dataset = train_dataset.prefetch(buffer_size=AUTOTUNE)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Model ( examine input layer )
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
inputs = tf.keras.Input(shape=(160, 160, 3))
model = tf.keras.Model(inputs, outputs)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Training
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
history = model.fit(train_dataset, epochs=initial_epochs, validation_data=validation_dataset)
...

Related

Tensorflow keras, no gradients error when use MSE

I am trying the code from tensorflow "Writing a training loop from scratch" with some changes by myself. I changed the loss function from SparseCategoricalCrossentropy to MeanSquaredError. I also changed the architecture of the model by adding a new Lambda layer for loss calculation. However, I have the Value error that no gradients provided for variable. Is there any way that I can make the code to run with MSE?
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
inputs = keras.Input(shape=(784,), name="digits")
x1 = layers.Dense(64, activation="relu")(inputs)
x2 = layers.Dense(64, activation="relu")(x1)
outputs = layers.Dense(10, name="predictions")(x2)
final_outputs = layers.Lambda(lambda x: tf.math.argmax(x, axis = -1))(outputs)
model = keras.Model(inputs=inputs, outputs=final_outputs)
# Instantiate an optimizer.
optimizer = keras.optimizers.SGD(learning_rate=1e-3)
# Instantiate a loss function.
loss_fn = keras.losses.MeanSquaredError()
# Prepare the training dataset.
batch_size = 64
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = np.reshape(x_train, (-1, 784))
x_test = np.reshape(x_test, (-1, 784))
# Reserve 10,000 samples for validation.
x_val = x_train[-10000:]
y_val = y_train[-10000:]
x_train = x_train[:-10000]
y_train = y_train[:-10000]
# Prepare the training dataset.
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(batch_size)
# Prepare the validation dataset.
val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
val_dataset = val_dataset.batch(batch_size)
epochs = 2
for epoch in range(epochs):
print("\nStart of epoch %d" % (epoch,))
for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):
with tf.GradientTape() as tape:
logits = model(x_batch_train, training=True)
loss_value = loss_fn(y_batch_train, logits)
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
argmax ops is not differentiable. To use an integer label and MSE loss, you want to convert your labels y_train and y_val to integers.
y_train = np.argmax(y_train, axis=-1)
y_val = np.argmax(y_val, axis=-1)
And adjust the output layer to output integer labels
outputs = layers.Dense(1, name="predictions")(x2)

How to fit data for augmentation to avoid out of memory error?

Doing augmentation for training segmentation but the total number of images is about 26,000+. That's the reason facing problem in making an array of images.
Tried:
def get_data():
X_data = []
train_images = sorted(glob('../input/fg_image/images/*.jpg', recursive=True))
size = 128, 128
X_data = np.empty((len(train_images),128, 128, 3), dtype=np.float32)
for i, image in enumerate(train_images):
X_data[i] = np.asarray(Image.open(image).thumbnail(size))
return X_data
X_train = get_data()
By following the above method I am collecting the X_train, Y_train. Up to this step, it's working fine.
But further when applying the below method for augmentation is the whole notebook was crashed.
def augmentation(X_data, Y_data, validation_split=0.2, batch_size=32, seed=42):
X_train, X_test, Y_train, Y_test = train_test_split(X_data,
Y_data,
train_size=1-validation_split,
test_size=validation_split,
random_state=seed)
data_gen_args = dict(rotation_range=45.,
width_shift_range=0.1,
height_shift_range=0.1)
X_datagen = ImageDataGenerator(**data_gen_args)
Y_datagen = ImageDataGenerator(**data_gen_args)
X_datagen.fit(X_train, augment=True, seed=seed)
Y_datagen.fit(Y_train, augment=True, seed=seed)
X_train_augmented = X_datagen.flow(X_train, batch_size=batch_size, shuffle=True, seed=seed)
Y_train_augmented = Y_datagen.flow(Y_train, batch_size=batch_size, shuffle=True, seed=seed)
train_generator = zip(X_train_augmented, Y_train_augmented)
return train_generator
train_generator = augmentation(X_train, Y_train)

Tensorflow Keras Shape mismatch

While trying to implement a standard MNIST digit recognizer that many tutorials use to introduce you to neural networks, I'm encountering the error
ValueError: Shape mismatch: The shape of labels (received (1,)) should equal the shape of logits except for the last dimension (received (28, 10)).
I would like to use from_tensor_slices to process the data, since I want to apply the code to another problem where the data comes from a CSV file. Anyway, here is the code producing the error in the line model.fit(...)
import tensorflow as tf
train_dataset, test_dataset = tf.keras.datasets.mnist.load_data()
train_images, train_labels = train_dataset
train_images = train_images/255.0
train_dataset_tensor = tf.data.Dataset.from_tensor_slices((train_images, train_labels))
num_of_validation_data = 10000
validation_data = train_dataset_tensor.take(num_of_validation_data)
train_data = train_dataset_tensor.skip(num_of_validation_data)
model = tf.keras.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(100, activation='sigmoid'),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=1e-3),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy']
)
model.fit(train_data, batch_size=50, epochs=5)
performance = model.evaluate(validation_data)
I don't understand where the shape (28, 10) of the logits comes from, I thought I was flattening the image, essentially making a 1D vector out of the 2D image? How can I prevent the error?
You can use the following code
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
x_train = x_train[..., tf.newaxis]
x_test = x_test[..., tf.newaxis]
train_ds = tf.data.Dataset.from_tensor_slices(
(x_train, y_train)).shuffle(10000).batch(32)
test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(32)
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(100, activation='sigmoid'),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=1e-3),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy']
)
model.fit(train_ds)

Keras: "ValueError: Error when checking target"

I am trying to build a model, which will classify video to certain category.
For this I used pretrained model - InceptionV3 and trained it on my own data. Training process was completed successfully, but when I tried to classify video I got the error:
ValueError: Error when checking : expected input_1 to have shape (None, None, None, 3) but got array with shape (1, 1, 104, 2048)
However for prediction I used the same video as for training process.
Defined model:
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
horizontal_flip=True,
rotation_range=10.,
width_shift_range=0.2,
height_shift_range=0.2)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
os.path.join('data', 'train'),
target_size=(299, 299),
batch_size=32,
classes=data.classes,
class_mode='categorical')
validation_generator = test_datagen.flow_from_directory(
os.path.join('data', 'test'),
target_size=(299, 299),
batch_size=32,
classes=data.classes,
class_mode='categorical')
base_model = InceptionV3(weights=weights, include_top=False)
# add a global spatial average pooling layer
x = base_model.output
x = GlobalAveragePooling2D()(x)
# let's add a fully-connected layer
x = Dense(1024, activation='relu')(x)
# and a logistic layer
predictions = Dense(len(data.classes), activation='softmax')(x)
# this is the model we will train
model = Model(inputs=base_model.input, outputs=predictions)
model.fit_generator(
train_generator,
steps_per_epoch=100,
validation_data=validation_generator,
validation_steps=10,
epochs=nb_epoch,
callbacks=callbacks)
Predictions:
#extract features from frames of video
files = [f for f in os.listdir('.') if os.path.isfile(f)]
for f in files:
features = extractor_model.extract(f)
sequence.append(features)
np.save(sequence_path, sequence)
sequences = np.load("data_final.npy")
#convert numpy array tp 4 dimensions
sequences = np.expand_dims(sequences, axis=0)
sequences = np.expand_dims(sequences, axis=0)
prediction = model.predict(sequences)
Features extractor:
def extract(self, image_path):
#print(image_path)
img = image.load_img(image_path, target_size=(299, 299))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
# Get the prediction.
features = self.model.predict(x)
if self.weights is None:
# For imagenet/default network:
features = features[0]
else:
# For loaded network:
features = features[0]
return features
Keras complains about the shape is not None...
However I expect to receive some predictions of the model, but got this error. Please help. Thanks

Keras model failed to learn anything after changing to use tf.data api

I was trying to convert a simple Keras model to use tf.data api for data loading, but somehow the accuracy remains about 10% during the whole 10 epochs.
In comparison, the original code without using tf.data api can easily achieve about 98% accuracy. Did I do anything wrong?
The version using tf.data api
import math
import tensorflow as tf
import numpy as np
batch_size = 32
def load_data():
mnist = tf.keras.datasets.mnist
(train_data, train_label), (validation_data, validation_label) = mnist.load_data()
train_data, validation_data = train_data / 255.0, validation_data / 255.0
train_label = train_label.astype(np.float32)
return train_data, train_label
def build_model():
class MyModel(tf.keras.Model):
def __init__(self):
super(MyModel, self).__init__(name='my_model')
self.flatten = tf.keras.layers.Flatten()
self.dense_1 = tf.keras.layers.Dense(512, activation=tf.nn.relu)
self.dropout = tf.keras.layers.Dropout(0.2)
self.dense_2 = tf.keras.layers.Dense(10, activation=tf.nn.softmax)
def call(self, inputs):
x = self.flatten(inputs)
x = self.dense_1(x)
x = self.dropout(x)
y = self.dense_2(x)
return y
model = MyModel()
model.compile(optimizer=tf.train.AdamOptimizer(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
train_data, train_label = load_data()
train_sample_count = len(train_data)
train_dataset = tf.data.Dataset.from_tensor_slices((train_data, train_label))
train_dataset = train_dataset.batch(batch_size)
train_dataset = train_dataset.repeat()
model = build_model()
model.fit(
train_dataset,
epochs=10,
steps_per_epoch=math.ceil(train_sample_count/batch_size)
)
The version without using tf.data api
# load_data and build_model are exactly same as those in the tf.data api version
train_data, train_label = load_data()
model = build_model()
model.fit(
train_data,
train_label,
epochs=10
)