Why is my confusion matrix "shifted" to the right? - tensorflow

I built a classifier of 4 flower types based on ResNet 50. The accuracy is really high during training, and everything seems good. However, once I plot my confusion matrix, I see that the values are "shifted" to the right instead of in the main diagonal.
What does this mean? Is it a problem with my dataset, or my code?
Here's what I did to use ResNet 50:
def create_model(input_shape, top='flatten'):
if top not in ('flatten', 'avg', 'max'):
raise ValueError('unexpected top layer type: %s' % top)
# connects base model with new "head"
BottleneckLayer = {
'flatten': Flatten(),
'avg': GlobalAvgPooling2D(),
'max': GlobalMaxPooling2D()
}[top]
base = InceptionResNetV2(input_shape=input_shape,
include_top=False,
weights='imagenet')
x = BottleneckLayer(base.output)
x = Dense(NUM_OF_FLOWERS, activation='linear')(x)
model = Model(inputs=base.inputs, outputs=x)
return model
base = ResNet50(input_shape=input_shape, include_top=False)
x = Flatten()(base.output)
x = Dense(NUM_OF_FLOWERS, activation='softmax')(x)
model = Model(inputs=base.inputs, outputs=x)
Confusion Matrix Generation:
# Predict the values from the validation dataset
Y_pred = model.predict_generator(validation_generator, nb_validation_samples // batch_size+1)
# Convert predictions classes to one hot vectors
Y_pred_classes = numpy.argmax(Y_pred, axis = 1)
# Convert validation observations to one hot vectors
Y_true = validation_generator.classes
# compute the confusion matrix
confusion_mtx = confusion_matrix(Y_true, Y_pred_classes)
# plot the confusion matrix
plot_confusion_matrix(confusion_mtx, classes = range(4))
As requested, this is how I created the generators:
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
color_mode='rgb',
class_mode='categorical',
shuffle=True)
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
color_mode='rgb',
class_mode='categorical',
shuffle=False)
Here is an image album of my confusion matrix. Every time I execute model.predict(), the predictions change, always shifting one cell to the right.
Confusion Matrix Album

Yes I imagine it is the code, check your indexing where you create your confusion matrix, it will be off by one

look the validation_generator class. when you use data_generator.flow_from_directory you need see if param shuffle is equal to False like the example above:
val_generator = val_data_generator.flow_from_directory(
test_data_dir,
target_size=(IMAGE_WIDTH, IMAGE_HEIGHT),
batch_size=100,
class_mode="binary",
classes=['dog','cat'],
shuffle=False)
because the default param is True and the only shuffle the images and not labels.

This is an interesting problem. It can be fixed by reloading the imagedatagenerator right before you do a model.predict.
So:
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
color_mode='rgb',
class_mode='categorical',
shuffle=False)
Y_pred = model.predict_generator(validation_generator, nb_validation_samples // batch_size+1)
# Convert predictions classes to one hot vectors
Y_pred_classes = numpy.argmax(Y_pred, axis = 1)
# Convert validation observations to one hot vectors
Y_true = validation_generator.classes
# compute the confusion matrix
confusion_mtx = confusion_matrix(Y_true, Y_pred_classes)
# plot the confusion matrix
plot_confusion_matrix(confusion_mtx, classes = range(4))

Related

sklearn classification_report ValueError: Found input variables with inconsistent numbers of samples: [18, 576]

I'm working on a CNN classification problem. I used keras and a pre-trained model. Now I want to evaluate my model and need the precision, recall and f1-Score. When I use sklearn.metrics classification_report I get above error. I know where the numbers are coming from, first is the length of my test dataset in batches and second are the number of actual sampels (predictions) in there. However I don't know how to "convert" them.
See my code down below:
# load train_ds
train_ds = tf.keras.utils.image_dataset_from_directory(
directory ='/gdrive/My Drive/Flies_dt/224x224',
image_size = (224, 224),
validation_split = 0.40,
subset = "training",
seed = 123,
shuffle = True)
# load val_ds
val_ds = tf.keras.utils.image_dataset_from_directory(
directory ='/gdrive/My Drive/Flies_dt/224x224',
image_size = (224, 224),
validation_split = 0.40,
subset = "validation",
seed = 123,
shuffle = True)
# move some batches of val_ds to test_ds
test_ds = val_ds.take((1*len(val_ds)) // 2)
print('test_ds =', len(test_ds))
val_ds = val_ds.skip((1*len(val_ds)) // 2)
print('val_ds =', len(val_ds)) #test_ds = 18 val_ds = 18
# Load Model
base_model = keras.applications.vgg19.VGG19(
include_top=False,
weights='imagenet',
input_shape=(224,224,3)
)
# Freeze base_model
base_model.trainable = False
#
inputs = keras.Input(shape=(224,224,3))
x = data_augmentation(inputs) #apply data augmentation
# Preprocessing
x = tf.keras.applications.vgg19.preprocess_input(x)
# The base model contains batchnorm layers. We want to keep them in inference mode
# when we unfreeze the base model for fine-tuning, so we make sure that the
# base_model is running in inference mode here.
x = base_model(x, training=False)
x = keras.layers.GlobalAveragePooling2D()(x)
x = keras.layers.Dropout(0.2)(x) # Regularize with dropout
outputs = keras.layers.Dense(5, activation="softmax")(x)
model = keras.Model(inputs, outputs)
model.compile(
loss="sparse_categorical_crossentropy",
optimizer="Adam",
metrics=['acc']
)
model.fit(train_ds, epochs=8, validation_data=val_ds, callbacks=[tensorboard_callback])
# Unfreeze the base_model. Note that it keeps running in inference mode
# since we passed `training=False` when calling it. This means that
# the batchnorm layers will not update their batch statistics.
# This prevents the batchnorm layers from undoing all the training
# we've done so far.
base_model.trainable = True
model.summary()
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=0.000001), # Low learning rate
loss="sparse_categorical_crossentropy",
metrics=['acc']
)
model.fit(train_ds, epochs=5, validation_data=val_ds)
#Evaluate
from sklearn.metrics import classification_report
y_pred = model.predict(test_ds, batch_size=64, verbose=1)
y_pred_bool = np.argmax(y_pred, axis=1)
print(classification_report(test_ds, y_pred_bool))
I also tried something like this, but I'm not sure if this gives me the correct values for multiclass classification.
from keras import backend as K
def recall_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def f1_m(y_true, y_pred):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
# compile the model
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc',f1_m,precision_m, recall_m])
# fit the model
history = model.fit(Xtrain, ytrain, validation_split=0.3, epochs=10, verbose=0)
# evaluate the model
loss, accuracy, f1_score, precision, recall = model.evaluate(Xtest, ytest, verbose=0)
This is a lot, Sorry. Hope somebody can help.

I am training a deepfake image detection model, but why the validation accuracy is not changing?

I am training deepfake image detection using Tensorflow, but the validation accuracy is stuck at 67. I have tried to use different optimizers, but it's not decreasing and only floating around the same score.
Here is my step to creating the model.
Importing data from the image folder
Create an ImageDataGenerator object to do some augmentation.
datagen = ImageDataGenerator(
horizontal_flip=True,
validation_split=0.2,
rescale=1./255,
)
Creating the model
image dimension: 299, 299, 3
input_layer = Input(shape = (image_dimensions['height'], image_dimensions['width'], image_dimensions['channels']))
base_model = keras.applications.EfficientNetB5(
weights='imagenet',
input_shape=(image_dimensions['height'], image_dimensions['width'], image_dimensions['channels']),
include_top=False)
base_model.trainable = False
x = base_model(input_layer, training=False)
# Add pooling layer or flatten layer
y = GlobalAveragePooling2D()(x)
y = Dense(512, activation='relu')(y)
y = Dropout(0.4)(y)
y = Dense(256)(y)
# Add final dense layer
output_layer = Dense(1, activation='sigmoid')(y)
model = Model(inputs=input_layer, outputs=output_layer)
Training
efficientNet = EfficientNet(learning_rate = 0.001)
efficientNet.summary()
history = efficientNet.fit(datagen.flow(X_train, y_train, batch_size=64, subset='training'),
epochs=10,
validation_data=datagen.flow(X_train, y_train, batch_size=64, subset='validation'))
Result
Here is the result of the model training
Is there anyway I can fix this problem?

VGG 16 model training with tensorflow

I'm trying to use VGG16 from keras to train a model for image detection.
Based on these articles (https://www.pyimagesearch.com/2019/06/03/fine-tuning-with-keras-and-deep-learning/ and https://learnopencv.com/keras-tutorial-fine-tuning-using-pre-trained-models/), I've put some addition Dense layer to the VGG 16 model. However, the training accuracy with 20 epoche is around 35% to 41% which doesn't match the result of these articles (above 90%).
Due to this, I would like to know, did I do something wrong with my code below.
Basic setting
url='/content/drive/My Drive/fer2013.csv'
batch_size = 64
img_width,img_height = 48,48
# 0=Angry, 1=Disgust, 2=Fear, 3=Happy, 4=Sad, 5=Surprise, 6=Neutral
num_classes = 7
model_path = '/content/drive/My Drive/Af/cnn.h5'
df=pd.read_csv(url)
def _load_fer():
# Load training and eval data
df = pd.read_csv(url, sep=',')
train_df = df[df['Usage'] == 'Training']
eval_df = df[df['Usage'] == 'PublicTest']
return train_df, eval_df
def _preprocess_fer(df,label_col='emotion',feature_col='pixels'):
labels, features = df.loc[:, label_col].values.astype(np.int32), [
np.fromstring(image, np.float32, sep=' ')
for image in df.loc[:, feature_col].values]
labels = [to_categorical(l, num_classes=num_classes) for l in labels]
features = np.stack((features,) * 3, axis=-1)
features /= 255
features = features.reshape(features.shape[0], img_width, img_height,3)
return features, labels
# Load fer data
train_df, eval_df = _load_fer()
# preprocess fer data
x_train, y_train = _preprocess_fer(train_df)
x_valid, y_valid = _preprocess_fer(eval_df)
gen = ImageDataGenerator(
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
train_generator = gen.flow(x_train, y_train, batch_size=batch_size)
predict_size_train = int(np.math.ceil(len(x_train) / batch_size))
input_tensor = Input(shape=(img_width, img_height, 3))
Now comes the model training part
baseModel = VGG16(
include_top=False, weights='imagenet',
input_tensor=input_tensor
)
# Construct the head of the model that will be placed on top of the base model (fine tuning)
headModel = baseModel.output
headModel = Flatten()(headModel)
headModel = Dense(1024, activation="relu")(headModel)
#headModel = Dropout(0.5)(headModel)
headModel = BatchNormalization()(headModel)
headModel = Dense(num_classes, activation="softmax")(headModel)
model = Model(inputs=baseModel.input, outputs=headModel)
for layer in baseModel.layers:
layer.trainable = False
model summary
model.compile(loss='categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(lr=0.001),
metrics=['accuracy'])
history = model.fit(train_generator,
steps_per_epoch=predict_size_train * 1,
epochs=20,
validation_data=valid_generator,
validation_steps=predict_size_valid)
Result:
Result after training
It will be very thankful for you advice.
Best Regards.
Since you are freezing all layers, only one dense layer might not give you desired accuracy. Also if you are not in hurry, you may not set up the validation_steps and steps_per_epochs parameters. Also in this tutorial, model is having fluctuations, which do not want.
I suggest:
for layer in baseModel.layers:
layer.trainable = False
base_out = baseModel.get_layer('block3_pool').output // layer name may be different,
check with model baseModel.summary
With that you can get spefic layer's output. After got the output, you can add some convolutions. After convolutions try stacking more dense layers like:
x = tf.keras.layers.Flatten()(x)
x = Dense(512, activation= 'relu')(x)
x = Dropout(0.3)(x)
x = Dense(256, activation= 'relu')(x)
x = Dropout(0.2)(x)
output_model = Dense(num_classes, activation = 'softmax')(x)
If you don't want to add convolutions and use baseModel completely, that's also fine however you can do something like this:
for layer in baseModel.layers[:12]: // 12 is random you can try different ones. Not
all layers are frozen this time.
layer.trainable = False
for i, layer in enumerate(baseModel.layers):
print(i, layer.name, layer.trainable)
// check frozen layers
After that, you can try to set:
headModel = baseModel.output
headModel = Flatten()(headModel)
headModel = Dense(1024, activation="relu")(headModel)
headModel = Dropout(0.5)(headModel)
headModel = Dense(512, activation="relu")(headModel)
headModel = Dense(num_classes, activation="softmax")(headModel)
If you see your model is learning, but your loss having fluctuations then you can reduce learning rate. Or you can use ReduceLROnPlateau callback:
rd_lr = ReduceLROnPlateau(monitor='val_loss', factor = np.sqrt(0.1), patience= 4, verbose = 1, min_lr = 5e-8)
Parameters are totally up to your model. For more details you can see docs
what is the form of the content of y_train. If they are integer values then you need to convert them to one hot vectors with
y_train=tf.keras.utils.to_categorical(train, num_classes)
since you are using loss='categorical_crossentropy' in model.compile. In addition VGG16 requires that the pixels be scaled between -1 and +1 so in include
gen = ImageDataGenerator(tf.keras.applications.vgg16.preprocess_input, etc
When you are training you have
for layer in baseModel.layers:
layer.trainable = False
so you are only training the dense layer which is OK but may not give you high accuracy. You might want to leave VGG as trainable but of course this will take longer. Or after you train with VGG not trainable, then change it back to trainable and run a few more epochs to fine tune the model.

Tensorflow: train multiple models in parallel with the same ImageDataGenerator

I'm doing HPO on a small custom CNN. During training the GPU is under-utilised and I'm finding a bottleneck in the CPU: the data augmentation process is too slow. Looking online, I found that I could use multiple CPU cores for the generator and speedup the process. I set up workers=n_cores and this did improve things, but not as much as I'd like.
So I though that I could train multiple models simultaneously on the GPU, and feed the same augmented data to the models. However, I can't come up with some idea on how to do this and I couldn't find any similar question.
Here's a minimal example (I'm leaving out imports for brevity):
# load model and set only last layer as trainable
def create_model(learning_rate, alpha, dropout):
model_path = '/content/drive/My Drive/Progetto Advanced Machine Learning/Model Checkpoints/Custom Model 1 2020-06-01 10:56:21.010759.hdf5'
model = tf.keras.models.load_model(model_path)
x = model.layers[-2].output
x = Dropout(dropout)(x)
predictions = Dense(120, activation='softmax', name='prediction', kernel_regularizer=tf.keras.regularizers.l2(alpha))(x)
model = Model(inputs=model.inputs, outputs=predictions)
for layer in model.layers[:-2]:
layer.trainable = False
model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate), metrics=['accuracy'])
return model
#declare the search space
SEARCH_SPACE = [skopt.space.Real(0.0001, 0.1, name='learning_rate', prior='log-uniform'),
skopt.space.Real(1e-9, 1, name='alpha', prior='log-uniform'),
skopt.space.Real(0.0001, 0.95, name='dropout', prior='log-uniform')]
# declare generator
train_datagenerator = ImageDataGenerator(rescale=1. / 255, rotation_range=30, zoom_range=0.2, horizontal_flip=True, validation_split=0.2, data_format='channels_last')
# training function to be called by the optimiser
#use_named_args(SEARCH_SPACE)
def fitness(learning_rate, alpha, dropout):
model = create_model(learning_rate, alpha, dropout)
#compile generators
train_batches = train_datagenerator.flow_from_directory(train_out_path, target_size=image_size, color_mode="rgb", class_mode="categorical" , batch_size=32, subset='training', seed = 20052020)
val_batches = train_datagenerator.flow_from_directory(directory=train_out_path, target_size=image_size, color_mode="rgb", class_mode="categorical" , batch_size=32, subset='validation', shuffle=False, seed = 20052020)
#train
early_stopping = EarlyStopping(monitor='val_loss', patience=3, restore_best_weights=True)
training_results = model.fit(train_batches, epochs=5, verbose=1, shuffle=True, validation_data=val_batches, workers=2)
history[hyperpars] = training_results.history
with open(dict_save_path, 'wb') as f:
pickle.dump(history, f)
return training_results.history['val_accuracy'][-1]
# HPO
result = skopt.forest_minimize(fitness, SEARCH_SPACE, n_calls=10, callback=checkpoint_saver)

Keras split train test set when using ImageDataGenerator

I have a single directory which contains sub-folders (according to labels) of images. I want to split this data into train and test set while using ImageDataGenerator in Keras. Although model.fit() in keras has argument validation_split for specifying the split, I could not find the same for model.fit_generator(). How to do it ?
train_datagen = ImageDataGenerator(rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
samples_per_epoch=nb_train_samples,
nb_epoch=nb_epoch,
validation_data=??,
nb_val_samples=nb_validation_samples)
I don't have separate directory for validation data, need to split it from the training data
Keras has now added Train / validation split from a single directory using ImageDataGenerator:
train_datagen = ImageDataGenerator(rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
validation_split=0.2) # set validation split
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='binary',
subset='training') # set as training data
validation_generator = train_datagen.flow_from_directory(
train_data_dir, # same directory as training data
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='binary',
subset='validation') # set as validation data
model.fit_generator(
train_generator,
steps_per_epoch = train_generator.samples // batch_size,
validation_data = validation_generator,
validation_steps = validation_generator.samples // batch_size,
epochs = nb_epochs)
https://keras.io/preprocessing/image/
For example, you have folder like this
full_dataset
|--horse (40 images)
|--donkey (30 images)
|--cow ((50 images)
|--zebra (70 images)
FIRST WAY
image_generator = ImageDataGenerator(rescale=1/255, validation_split=0.2)
train_dataset = image_generator.flow_from_directory(batch_size=32,
directory='full_dataset',
shuffle=True,
target_size=(280, 280),
subset="training",
class_mode='categorical')
validation_dataset = image_generator.flow_from_directory(batch_size=32,
directory='full_dataset',
shuffle=True,
target_size=(280, 280),
subset="validation",
class_mode='categorical')
SECOND WAY
import glob
horse = glob.glob('full_dataset/horse/*.*')
donkey = glob.glob('full_dataset/donkey/*.*')
cow = glob.glob('full_dataset/cow/*.*')
zebra = glob.glob('full_dataset/zebra/*.*')
data = []
labels = []
for i in horse:
image=tf.keras.preprocessing.image.load_img(i, color_mode='RGB',
target_size= (280,280))
image=np.array(image)
data.append(image)
labels.append(0)
for i in donkey:
image=tf.keras.preprocessing.image.load_img(i, color_mode='RGB',
target_size= (280,280))
image=np.array(image)
data.append(image)
labels.append(1)
for i in cow:
image=tf.keras.preprocessing.image.load_img(i, color_mode='RGB',
target_size= (280,280))
image=np.array(image)
data.append(image)
labels.append(2)
for i in zebra:
image=tf.keras.preprocessing.image.load_img(i, color_mode='RGB',
target_size= (280,280))
image=np.array(image)
data.append(image)
labels.append(3)
data = np.array(data)
labels = np.array(labels)
from sklearn.model_selection import train_test_split
X_train, X_test, ytrain, ytest = train_test_split(data, labels, test_size=0.2,
random_state=42)
Main drawback from First way, you can't use for display a picture. It will error if you write validation_dataset[1]. But it worked if I use first way : X_test[1]
With reference to this question https://github.com/keras-team/keras/issues/597 , you can use the following code to split the whole set into train and val:
train_datagen = ImageDataGenerator(rescale=1./255,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True
validation_split=0.2) # val 20%
val_datagen = ImageDataGenerator(rescale=1./255, validation_split=0.2)
train_data = train_datagen.flow_from_directory(train_path,
target_size=(224, 224),
color_mode='rgb',
batch_size=BS,
class_mode='categorical',
shuffle=True,
subset = 'training')
val_data = val_datagen.flow_from_directory(train_path,
target_size=(224, 224),
color_mode='rgb',
batch_size=BS,
class_mode='categorical',
shuffle=False,
subset = 'validation')
If we use subset in ImageDataGenerator then same augmentation will be applied to both training and validation. If you want to apply augmentation only on training set, you can split the folders using split-folders package which can be installed directly using pip.
https://pypi.org/project/split-folders/
This will separate the dataset into train, val and test directory and then you can create separate generator for each of them.
I have a PR for it. One way is to hash the filenames and do a variant assignment.
Example:
# -*- coding: utf-8 -*-
"""Train model using transfer learning."""
import os
import re
import glob
import hashlib
import argparse
import warnings
import six
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import gfile
from keras.models import Model
from keras import backend as K
from keras.optimizers import SGD
from keras.layers import Dense, GlobalAveragePooling2D, Input
from keras.applications.inception_v3 import InceptionV3
from keras.preprocessing.image import (ImageDataGenerator, Iterator,
array_to_img, img_to_array, load_img)
from keras.callbacks import ModelCheckpoint, TensorBoard, EarlyStopping
RANDOM_SEED = 0
MAX_NUM_IMAGES_PER_CLASS = 2 ** 27 - 1 # ~134M
VALID_IMAGE_FORMATS = frozenset(['jpg', 'jpeg', 'JPG', 'JPEG'])
# we chose to train the top 2 inception blocks
BATCH_SIZE = 100
TRAINABLE_LAYERS = 172
INCEPTIONV3_BASE_LAYERS = len(InceptionV3(weights=None, include_top=False).layers)
STEPS_PER_EPOCH = 625
VALIDATION_STEPS = 100
MODEL_INPUT_WIDTH = 299
MODEL_INPUT_HEIGHT = 299
MODEL_INPUT_DEPTH = 3
FC_LAYER_SIZE = 1024
# Helper: Save the model.
checkpointer = ModelCheckpoint(
filepath='./output/checkpoints/inception.{epoch:03d}-{val_loss:.2f}.hdf5',
verbose=1,
save_best_only=True)
# Helper: Stop when we stop learning.
early_stopper = EarlyStopping(patience=10)
# Helper: TensorBoard
tensorboard = TensorBoard(log_dir='./output/')
def as_bytes(bytes_or_text, encoding='utf-8'):
"""Converts bytes or unicode to `bytes`, using utf-8 encoding for text.
# Arguments
bytes_or_text: A `bytes`, `str`, or `unicode` object.
encoding: A string indicating the charset for encoding unicode.
# Returns
A `bytes` object.
# Raises
TypeError: If `bytes_or_text` is not a binary or unicode string.
"""
if isinstance(bytes_or_text, six.text_type):
return bytes_or_text.encode(encoding)
elif isinstance(bytes_or_text, bytes):
return bytes_or_text
else:
raise TypeError('Expected binary or unicode string, got %r' %
(bytes_or_text,))
class CustomImageDataGenerator(ImageDataGenerator):
def flow_from_image_lists(self, image_lists,
category, image_dir,
target_size=(256, 256), color_mode='rgb',
class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='jpeg'):
return ImageListIterator(
image_lists, self,
category, image_dir,
target_size=target_size, color_mode=color_mode,
class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format)
class ImageListIterator(Iterator):
"""Iterator capable of reading images from a directory on disk.
# Arguments
image_lists: Dictionary of training images for each label.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
classes: Optional list of strings, names of sudirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
"""
def __init__(self, image_lists, image_data_generator,
category, image_dir,
target_size=(256, 256), color_mode='rgb',
class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='jpeg'):
if data_format is None:
data_format = K.image_data_format()
classes = list(image_lists.keys())
self.category = category
self.num_class = len(classes)
self.image_lists = image_lists
self.image_dir = image_dir
how_many_files = 0
for label_name in classes:
for _ in self.image_lists[label_name][category]:
how_many_files += 1
self.samples = how_many_files
self.class2id = dict(zip(classes, range(len(classes))))
self.id2class = dict((v, k) for k, v in self.class2id.items())
self.classes = np.zeros((self.samples,), dtype='int32')
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
if class_mode not in {'categorical', 'binary', 'sparse', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
i = 0
self.filenames = []
for label_name in classes:
for j, _ in enumerate(self.image_lists[label_name][category]):
self.classes[i] = self.class2id[label_name]
img_path = get_image_path(self.image_lists,
label_name,
j,
self.image_dir,
self.category)
self.filenames.append(img_path)
i += 1
print("Found {} {} files".format(len(self.filenames), category))
super(ImageListIterator, self).__init__(self.samples, batch_size, shuffle,
seed)
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
with self.lock:
index_array, current_index, current_batch_size = next(
self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
batch_x = np.zeros((current_batch_size,) + self.image_shape,
dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
img = load_img(self.filenames[j],
grayscale=grayscale,
target_size=self.target_size)
x = img_to_array(img, data_format=self.data_format)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i in range(current_batch_size):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=current_index + i,
hash=np.random.randint(10000),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(K.floatx())
elif self.class_mode == 'categorical':
batch_y = np.zeros((len(batch_x), self.num_class),
dtype=K.floatx())
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/image_retraining/retrain.py
def create_image_lists(image_dir, validation_pct=10):
"""Builds a list of training images from the file system.
Analyzes the sub folders in the image directory, splits them into stable
training, testing, and validation sets, and returns a data structure
describing the lists of images for each label and their paths.
# Arguments
image_dir: string path to a folder containing subfolders of images.
validation_pct: integer percentage of images reserved for validation.
# Returns
dictionary of label subfolder, with images split into training
and validation sets within each label.
"""
if not os.path.isdir(image_dir):
raise ValueError("Image directory {} not found.".format(image_dir))
image_lists = {}
sub_dirs = [x[0] for x in os.walk(image_dir)]
sub_dirs_without_root = sub_dirs[1:] # first element is root directory
for sub_dir in sub_dirs_without_root:
file_list = []
dir_name = os.path.basename(sub_dir)
if dir_name == image_dir:
continue
print("Looking for images in '{}'".format(dir_name))
for extension in VALID_IMAGE_FORMATS:
file_glob = os.path.join(image_dir, dir_name, '*.' + extension)
file_list.extend(glob.glob(file_glob))
if not file_list:
warnings.warn('No files found')
continue
if len(file_list) < 20:
warnings.warn('Folder has less than 20 images, which may cause '
'issues.')
elif len(file_list) > MAX_NUM_IMAGES_PER_CLASS:
warnings.warn('WARNING: Folder {} has more than {} images. Some '
'images will never be selected.'
.format(dir_name, MAX_NUM_IMAGES_PER_CLASS))
label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower())
training_images = []
validation_images = []
for file_name in file_list:
base_name = os.path.basename(file_name)
# Get the hash of the file name and perform variant assignment.
hash_name = hashlib.sha1(as_bytes(base_name)).hexdigest()
hash_pct = ((int(hash_name, 16) % (MAX_NUM_IMAGES_PER_CLASS + 1)) *
(100.0 / MAX_NUM_IMAGES_PER_CLASS))
if hash_pct < validation_pct:
validation_images.append(base_name)
else:
training_images.append(base_name)
image_lists[label_name] = {
'dir': dir_name,
'training': training_images,
'validation': validation_images,
}
return image_lists
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/image_retraining/retrain.py
def get_image_path(image_lists, label_name, index, image_dir, category):
""""Returns a path to an image for a label at the given index.
# Arguments
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Int offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
image_dir: Root folder string of the subfolders containing the training
images.
category: Name string of set to pull images from - training, testing, or
validation.
# Returns
File system path string to an image that meets the requested parameters.
"""
if label_name not in image_lists:
raise ValueError('Label does not exist ', label_name)
label_lists = image_lists[label_name]
if category not in label_lists:
raise ValueError('Category does not exist ', category)
category_list = label_lists[category]
if not category_list:
raise ValueError('Label %s has no images in the category %s.',
label_name, category)
mod_index = index % len(category_list)
base_name = category_list[mod_index]
sub_dir = label_lists['dir']
full_path = os.path.join(image_dir, sub_dir, base_name)
return full_path
def get_generators(image_lists, image_dir):
train_datagen = CustomImageDataGenerator(rescale=1. / 255,
horizontal_flip=True)
test_datagen = CustomImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_image_lists(
image_lists=image_lists,
category='training',
image_dir=image_dir,
target_size=(MODEL_INPUT_HEIGHT, MODEL_INPUT_WIDTH),
batch_size=BATCH_SIZE,
class_mode='categorical',
seed=RANDOM_SEED)
validation_generator = test_datagen.flow_from_image_lists(
image_lists=image_lists,
category='validation',
image_dir=image_dir,
target_size=(MODEL_INPUT_HEIGHT, MODEL_INPUT_WIDTH),
batch_size=BATCH_SIZE,
class_mode='categorical',
seed=RANDOM_SEED)
return train_generator, validation_generator
def get_model(num_classes, weights='imagenet'):
# create the base pre-trained model
# , input_tensor=input_tensor
base_model = InceptionV3(weights=weights, include_top=False)
# add a global spatial average pooling layer
x = base_model.output
x = GlobalAveragePooling2D()(x)
# let's add a fully-connected layer
x = Dense(FC_LAYER_SIZE, activation='relu')(x)
# and a logistic layer -- let's say we have 2 classes
predictions = Dense(num_classes, activation='softmax')(x)
# this is the model we will train
model = Model(inputs=[base_model.input], outputs=[predictions])
return model
def get_top_layer_model(model):
"""Used to train just the top layers of the model."""
# first: train only the top layers (which were randomly initialized)
# i.e. freeze all convolutional InceptionV3 layers
for layer in model.layers[:INCEPTIONV3_BASE_LAYERS]:
layer.trainable = False
for layer in model.layers[INCEPTIONV3_BASE_LAYERS:]:
layer.trainable = True
# compile the model (should be done after setting layers to non-trainable)
model.compile(optimizer='rmsprop', loss='categorical_crossentropy',
metrics=['accuracy'])
return model
def get_mid_layer_model(model):
"""After we fine-tune the dense layers, train deeper."""
# freeze the first TRAINABLE_LAYER_INDEX layers and unfreeze the rest
for layer in model.layers[:TRAINABLE_LAYERS]:
layer.trainable = False
for layer in model.layers[TRAINABLE_LAYERS:]:
layer.trainable = True
# we need to recompile the model for these modifications to take effect
# we use SGD with a low learning rate
model.compile(optimizer=SGD(lr=0.0001, momentum=0.9),
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
def train_model(model, epochs, generators, callbacks=None):
train_generator, validation_generator = generators
model.fit_generator(
train_generator,
steps_per_epoch=STEPS_PER_EPOCH,
validation_data=validation_generator,
validation_steps=VALIDATION_STEPS,
epochs=epochs,
callbacks=callbacks)
return model
def main(image_dir, validation_pct):
sub_dirs = [x[0] for x in gfile.Walk(image_dir)]
num_classes = len(sub_dirs) - 1
print("Number of classes found: {}".format(num_classes))
model = get_model(num_classes)
print("Using validation percent of %{}".format(validation_pct))
image_lists = create_image_lists(image_dir, validation_pct)
generators = get_generators(image_lists, image_dir)
# Get and train the top layers.
model = get_top_layer_model(model)
model = train_model(model, epochs=10, generators=generators)
# Get and train the mid layers.
model = get_mid_layer_model(model)
_ = train_model(model, epochs=100, generators=generators,
callbacks=[checkpointer, early_stopper, tensorboard])
# save model
model.save('./output/model.hdf5', overwrite=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--image-dir', required=True, help='data directory')
parser.add_argument('--validation-pct', default=10, help='validation percentage')
args = parser.parse_args()
os.makedirs('./output/checkpoints/', exist_ok=True)
main(**vars(args))
If you simply want to divide the dataset for training and validation (without any augmentation, etc.)
from tensorflow.keras.applications.xception import preprocess_input
from tensorflow.keras.preprocessing.image import ImageDataGenerator
ds_gen = ImageDataGenerator(
preprocessing_function=preprocess_input,
validation_split=0.2
)
train_ds = ds_gen.flow_from_directory(
"/path/to/dataset",
seed=1,
target_size=(150, 150), #adjust to your needs
batch_size=32,#adjust to your needs
class_mode='categorical',
subset='training'
)
val_ds = ds_gen.flow_from_directory(
"/path/to/dataset",
seed=1,
target_size=(150, 150),
batch_size=32,
class_mode='categorical',
subset='validation'
)
Here is the answer:
data_path = 'path/to/dir'
data_gen = ImageDataGenerator(rescale=1./255, validation_split=0.3)
train_data = data_gen.flow_from_directory(directory=data_path,target_size=img_size, batch_size=batch_size, subset='training', seed=42, class_mode='binary' )
test_data = data_gen.flow_from_directory(directory=data_path,target_size=img_size, batch_size=batch_size, subset='validation', seed=42, class_mode='binary' )
This is a simple tensorflow 2.0 code
from tensorflow.keras.preprocessing.image import ImageDataGenerator
def image_data_generator(data_dir,
data_augment=False,
batch_size=BATCH_SIZE,
target_size=(100, 100),
color_mode='rgb',
class_mode='binary',
shuffle=True):
if data_augment:
datagen = ImageDataGenerator(rescale=1./255,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
validation_split=0.2,#this is the trick
horizontal_flip=True)
else:
datagen = ImageDataGenerator(rescale=1./255)
generator = datagen.flow_from_directory(data_dir,
target_size=target_size,
color_mode=color_mode,
batch_size=batch_size,
shuffle=shuffle,
class_mode=class_mode)
return generator
train_generator = image_data_generator('Your_DataBase_Path',data_augment=True)
if you want to use pre processing units of VGG16 model and split your dataset into 70% training and 30% validation just follow this approach:
train_path = 'your dataset path'
train_batch=
ImageDataGenerator(preprocessing_function=tf.keras.applications.vgg16.preprocess_input, validation_split=0.3) \
.flow_from_directory(directory=train_path, target_size=(,), classes=['',''], batch_size= ,class_mode='categorical', subset='training')
val_batch=
ImageDataGenerator(preprocessing_function=tf.keras.applications.vgg16.preprocess_input, validation_split=0.3) \
.flow_from_directory(directory=train_path, target_size=(,), classes=['',''], batch_size=, class_mode='categorical', subset='validation')
If you are using TensorFlow 2.x, you can use the same fit() function and use the parameter validation_split also for Image ImageDataGenerator
I don't know if you are still interested, but I found the following workaround. The most important function is GetTrainValidTestGeneratorFromDir, the other ones are just used by it. The basic idea is that you first divide the ImageDataGenerator by two using validation_split. By means of this you will get two iterators. You can use the second one as the test iterator. You will further divide the first one in the following way:
First use flow_from_directory using training subset (so you can be sure that test data are excluded). Now you can use the same generator to get two divided dataframes and then you can use flow_from_dataframe function. You will get three ImageDataIterators without changing the folders
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 15 10:15:18 2021
#author: Alessandro
"""
import pandas as pd
from keras.preprocessing.image import ImageDataGenerator
def ShuffleDataframe(thedataframe):
thedataframe = thedataframe.sample(n=len(thedataframe), random_state=42)
thedataframe = thedataframe.reset_index()
thedataframe.drop('index', axis='columns', inplace=True)
return(thedataframe)
def TransformGeneratorClassNumberToLabels(theGenerator, theLabelsNumbers):
labelnames = theGenerator.class_indices
labelnames = list(labelnames.keys())
theLabelsString = [labelnames[i] for i in theLabelsNumbers]
return(theLabelsString)
def GetGeneratorDataframe(theGenerator):
training_filenames = theGenerator.filenames
theLabelsNumbers = theGenerator.classes
thelabelsString = TransformGeneratorClassNumberToLabels(theGenerator,
theLabelsNumbers)
thedataframe = pd.DataFrame({'File': training_filenames,
'Label': thelabelsString})
thedataframe = ShuffleDataframe(thedataframe)
return(thedataframe)
def GetTrainValidTestGeneratorFromDir(thedirectory,
input_shape= (256, 256, 3),
validation_split=0.1,
rescaling = 1./255):
train_datagen = ImageDataGenerator(rescale=1./255,
validation_split=0.2)
train_and_valid_generator = train_datagen.flow_from_directory(thedirectory,
target_size=input_shape[0:2],
batch_size=20,
class_mode="categorical",
subset = 'training',
save_to_dir ='checkdir')
test_generator = train_datagen.flow_from_directory(thedirectory,
target_size=input_shape[0:2],
batch_size=20,
class_mode="categorical",
subset = 'validation')
thedataframe = GetGeneratorDataframe(train_and_valid_generator)
class_mode = "categorical"
training_generator = train_datagen.flow_from_dataframe(dataframe = thedataframe,
directory = thedirectory,
target_size=input_shape[0:2],
x_col="File",
y_col = "Label",
subset= "training",
class_mode=class_mode)
validation_generator = train_datagen.flow_from_dataframe(dataframe = thedataframe,
directory = thedirectory,
target_size=input_shape[0:2],
x_col="File",
y_col = "Label",
subset= "validation",
class_mode=class_mode)
return training_generator, validation_generator, test_generator
input_shape = (450, 450, 3)
myDir = "MyFolder"
(training_generator,
validation_generator,
test_generator) = GetTrainValidTestGeneratorFromDir(myDir)
# next part is just to verify whhat you got
training_dataframe = GetGeneratorDataframe(training_generator)
valid_dataframe = GetGeneratorDataframe(validation_generator)
test_dataframe = GetGeneratorDataframe(test_generator)
Note that, starting from TF 2.9, the ImageDataGenerator() has been deprecated in favour of tf.keras.utils.image_dataset_from_directory() which achieves the same functionality.
It is highly likely that the former will be removed altogether in the upcoming TF versions.
Deprecated: tf.keras.preprocessing.image.ImageDataGenerator is not
recommended for new code. Prefer loading images with
tf.keras.utils.image_dataset_from_directory and transforming the
output tf.data.Dataset with preprocessing layers. For more
information, see the tutorials for loading images and augmenting
images, as well as the preprocessing layer guide.
In case you are looking for train/validation split in generators for segmentation task, you can use the following snippet:
from tensorflow.keras.preprocessing.image import ImageDataGenerator
BATCH_SIZE = 16
val_fraction = 0.1
image_generator = ImageDataGenerator(rescale=1/255,
brightness_range=[0.75, 1.75],
validation_split=val_fraction)
mask_generator = ImageDataGenerator(validation_split=val_fraction)
train_image_generator = image_generator.flow_from_dataframe(df_img,
directory=image_dir,
x_col='image',
class_mode=None,
color_mode='rgb',
target_size=(INPUT_SIZE, INPUT_SIZE),
batch_size=BATCH_SIZE,
shuffle=True,
subset='training',
seed=1)
train_mask_generator = mask_generator.flow_from_dataframe(df_gt,
directory=gt_dir,
x_col='mask',
color_mode='grayscale',
class_mode=None,
target_size=(INPUT_SIZE, INPUT_SIZE),
batch_size=BATCH_SIZE,
shuffle=True,
subset='training',
seed=1)
validation_image_generator = image_generator.flow_from_dataframe(df_img,
directory=image_dir,
x_col='image',
class_mode=None,
color_mode='rgb',
target_size=(INPUT_SIZE, INPUT_SIZE),
batch_size=BATCH_SIZE,
subset='validation',
seed=1)
validation_mask_generator = mask_generator.flow_from_dataframe(df_gt,
directory=gt_dir,
x_col='mask',
color_mode='grayscale',
class_mode=None,
target_size=(INPUT_SIZE, INPUT_SIZE),
batch_size=BATCH_SIZE,
subset='validation',
seed=1)
train_generator = zip(train_image_generator, train_mask_generator)
validation_generator = zip(validation_image_generator, validation_mask_generator)