I am trying to train a dl model with tf.keras. I have 67 classes of images inside the image directory like airports, bookstore, casino. And for each classes i have at least 100 images. The data is from mit indoor scene dataset But when I am trying to train the model, I am constantly getting this error.
tensorflow.python.framework.errors_impl.InvalidArgumentError: 2 root error(s) found.
(0) Invalid argument: Input size should match (header_size + row_size * abs_height) but they differ by 2
[[{{node decode_image/DecodeImage}}]]
[[IteratorGetNext]]
(1) Invalid argument: Input size should match (header_size + row_size * abs_height) but they differ by 2
[[{{node decode_image/DecodeImage}}]]
[[IteratorGetNext]]
[[IteratorGetNext/_7]]
0 successful operations.
0 derived errors ignored. [Op:__inference_train_function_1570]
Function call stack:
train_function -> train_function
I tried to resolve the problem by resizing the image with the resizing layer, also included the labels='inferred' and label_mode='categorical' in the image_dataset_from_directory method and included loss='categorical_crossentropy' in the model compile method. Previously labels and label_model were not set and loss was sparse_categorical_crossentropy which i think is not right. so I changed them as described above.But I am still having problems.
There is one question related to this in stackoverflow but the person did not mentioned how he solved the problem just updated that - My suggestion is to check the metadata of the dataset. It helped to fix my problem. But did not mentioned what metadata to look for or what he did to solve the problem.
The code that I am using to train the model -
import os
import PIL
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras.layers import Conv2D, Dense, MaxPooling2D, GlobalAveragePooling2D
from tensorflow.keras.layers import Flatten, Dropout, BatchNormalization, Rescaling
from tensorflow.keras.models import Sequential
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
from tensorflow.keras.regularizers import l1, l2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from pathlib import Path
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# define directory paths
PROJECT_PATH = Path.cwd()
DATA_PATH = PROJECT_PATH.joinpath('data', 'Images')
# create a dataset
batch_size = 32
img_height = 180
img_width = 180
train = tf.keras.utils.image_dataset_from_directory(
DATA_PATH,
validation_split=0.2,
subset="training",
labels="inferred",
label_mode="categorical",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size
)
valid = tf.keras.utils.image_dataset_from_directory(
DATA_PATH,
validation_split=0.2,
subset="validation",
labels="inferred",
label_mode="categorical",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size
)
class_names = train.class_names
for image_batch, label_batch in train.take(1):
print("\nImage shape:", image_batch.shape)
print("Label Shape", label_batch.shape)
# resize image
resize_layer = tf.keras.layers.Resizing(img_height, img_width)
train = train.map(lambda x, y: (resize_layer(x), y))
valid = valid.map(lambda x, y: (resize_layer(x), y))
# standardize the data
normalization_layer = tf.keras.layers.Rescaling(1./255)
train = train.map(lambda x, y: (normalization_layer(x), y))
valid = valid.map(lambda x, y: (normalization_layer(x), y))
image_batch, labels_batch = next(iter(train))
first_image = image_batch[0]
print("\nImage (min, max) value:", (np.min(first_image), np.max(first_image)))
print()
# configure the dataset for performance
AUTOTUNE = tf.data.AUTOTUNE
train = train.cache().prefetch(buffer_size=AUTOTUNE)
valid = valid.cache().prefetch(buffer_size=AUTOTUNE)
# create a basic model architecture
num_classes = len(class_names)
# initiate a sequential model
model = Sequential()
# CONV1
model.add(Conv2D(filters=64, kernel_size=3, activation="relu",
input_shape=(img_height, img_width, 3)))
model.add(BatchNormalization())
# CONV2
model.add(Conv2D(filters=64, kernel_size=3,
activation="relu"))
model.add(BatchNormalization())
# Pool + Dropout
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.3))
# CONV3
model.add(Conv2D(filters=128, kernel_size=3,
activation="relu"))
model.add(BatchNormalization())
# CONV4
model.add(Conv2D(filters=128, kernel_size=3,
activation="relu"))
model.add(BatchNormalization())
# POOL + Dropout
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.3))
# FC5
model.add(Flatten())
model.add(Dense(128, activation="relu"))
model.add(Dense(num_classes, activation="softmax"))
# compile the model
model.compile(loss="categorical_crossentropy",
optimizer="adam", metrics=['accuracy'])
# train the model
epochs = 25
early_stopping_cb = EarlyStopping(patience=10, restore_best_weights=True)
history = model.fit(train, validation_data=valid, epochs=epochs,
callbacks=[early_stopping_cb], verbose=2)
result = pd.DataFrame(history.history)
print()
print(result.head())
Note -
I just modified the code to make it as simple as possible to reduce the error. The model run for few batches than again got the above error.
Epoch 1/10
732/781 [===========================>..] - ETA: 22s - loss: 3.7882Traceback (most recent call last):
File ".\02_model1.py", line 139, in <module>
model.fit(train, epochs=10, validation_data=valid)
File "C:\Users\BHOLA\anaconda3\lib\site-packages\keras\engine\training.py", line 1184, in fit
tmp_logs = self.train_function(iterator)
File "C:\Users\BHOLA\anaconda3\lib\site-packages\tensorflow\python\eager\def_function.py", line 885, in __call__
result = self._call(*args, **kwds)
File "C:\Users\BHOLA\anaconda3\lib\site-packages\tensorflow\python\eager\def_function.py", line 917, in _call
return self._stateless_fn(*args, **kwds) # pylint: disable=not-callable
File "C:\Users\BHOLA\anaconda3\lib\site-packages\tensorflow\python\eager\function.py", line 3039, in __call__
return graph_function._call_flat(
File "C:\Users\BHOLA\anaconda3\lib\site-packages\tensorflow\python\eager\function.py", line 1963, in _call_flat
return self._build_call_outputs(self._inference_function.call(
File "C:\Users\BHOLA\anaconda3\lib\site-packages\tensorflow\python\eager\function.py", line 591, in call
outputs = execute.execute(
File "C:\Users\BHOLA\anaconda3\lib\site-packages\tensorflow\python\eager\execute.py", line 59, in quick_execute
tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
tensorflow.python.framework.errors_impl.InvalidArgumentError: 2 root error(s) found.
(0) Invalid argument: Input size should match (header_size + row_size * abs_height) but they differ by 2
[[{{node decode_image/DecodeImage}}]]
[[IteratorGetNext]]
(1) Invalid argument: Input size should match (header_size + row_size * abs_height) but they differ by 2
[[{{node decode_image/DecodeImage}}]]
[[IteratorGetNext]]
[[IteratorGetNext/_2]]
0 successful operations.
0 derived errors ignored. [Op:__inference_train_function_11840]
Function call stack:
train_function -> train_function
Modified code -
# create a dataset
batch_size = 16
img_height = 256
img_width = 256
train = image_dataset_from_directory(
DATA_PATH,
validation_split=0.2,
subset="training",
labels="inferred",
label_mode="categorical",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size
)
valid = image_dataset_from_directory(
DATA_PATH,
validation_split=0.2,
subset="validation",
labels="inferred",
label_mode="categorical",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size
)
model = tf.keras.applications.Xception(
weights=None, input_shape=(img_height, img_width, 3), classes=67)
model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
model.fit(train, epochs=10, validation_data=valid)
I think it might be a corrupted file. It is throwing an exception after a data integrity check in the DecodeBMPv2 function (https://github.com/tensorflow/tensorflow/blob/0b6b491d21d6a4eb5fbab1cca565bc1e94ca9543/tensorflow/core/kernels/image/decode_image_op.cc#L594)
If that's the issue and you want to find out which file(s) are throwing the exception, you can try something like this below on the directory containing the files. Remove/replace any files you find and it should train normally.
import glob
img_paths = glob.glob(os.path.join(<path_to_dataset>,'*/*.*') # assuming you point to the directory containing the label folders.
bad_paths = []
for image_path in img_paths:
try:
img_bytes = tf.io.read_file(path)
decoded_img = tf.io.decode_image(img_bytes)
except tf.errors.InvalidArgumentError as e:
print(f"Found bad path {image_path}...{e}")
bad_paths.append(image_path)
print(f"{image_path}: OK")
print("BAD PATHS:")
for bad_path in bad_paths:
print(f"{bad_path}")
This is in fact a corrupted file problem. However, the underlying issue is far more subtle. Here is an explanation of what is going on and how to circumvent this obstacle. I encountered the very same problem on the very same MIT Indoor Scene Classification dataset. All the images are JPEG files (spoiler alert: well, are they?).
It has been correctly noted that the exception is raised exactly here, in a C++ file related to the tf.io.decode_image() function. It is the decode_image() function where the issue lies, which is called by the
tf.keras.utils.image_dataset_from_directory().
On the other hand, tf.keras.preprocessing.image.ImageDataGenerator().flow_from_directory() relies on Pillow under the hood (shown here, which is called from here). This is the reason why adopting the ImageDataGenerator class works.
After closer inspection of the corresponding C++ source file, one can observe that the function is actually called DecodeBmpV2(...), as defined here. This raises the question of why a JPEG image is being treated as a BMP one. The aforementioned function is actually called here, as part of a basic switch statement the aim of which is further direct data conversion according to the determined type. Thus, the piece of code that determines the file type should be subjected to deeper analysis. The file type is determined according to the value of starting bytes (see here). Long story short, a simple comparison of so-called magic bytes that signify file type is performed.
Here is a code extract with the corresponding magic bytes.
static const char kPngMagicBytes[] = "\x89\x50\x4E\x47\x0D\x0A\x1A\x0A";
static const char kGifMagicBytes[] = "\x47\x49\x46\x38";
static const char kBmpMagicBytes[] = "\x42\x4d";
static const char kJpegMagicBytes[] = "\xff\xd8\xff";
After identifying which files raise the exception, I saw that they were supposed to be JPEG files, however, their starting bytes indicated a BMP format instead.
Here is an example of 3 files and their first 10 bytes.
laundromat\laundry_room_area.jpg
b'ffd8ffe000104a464946'
laundromat\Laundry_Room_Edens1A.jpg
b'ffd8ffe000104a464946'
laundromat\Laundry_Room_bmp.jpg
b'424d3800030000000000'
Look at the last one. It even contains the word bmp in the file name. Why is that so? I do not know. The dataset does contain corrupted image files. Someone probably converted the file from BMP to JPEG, yet the tool used did not work correctly. We can just guess the real reason, but that is now irrelevant.
The method by which the file type is determined is different from the one performed by the Pillow package, thus, there is nothing we can do about it. The recommendation is to identify the corrupted files, which is actually easy or to rely on the ImageDataGenerator. However, I would advise against doing so as this class has been marked as deprecated. It is not a bug in code per se, but rather bad data inadvertently introduced into the dataset.
I am having trouble finding a way to create a dataset in tensorflow from images. My dataset has the structure below:
fruit-data
|
|-train
| |
| |- Freshapple -> .png images of fresh apples
| |- Freshorange -> .png images of fresh oranges
| |- Freshbanana -> .png images of fresh bananas
|
|-test
| |
| |- Rottenapple -> .png images of rotten apples
| |- Rottenorange -> png images of rotten oranges
| |- Rottenbanana -> .png images of rotten bananas
|
I have my paths set as so and the classes set:
train_path = ".../Desktop/Data/fruit-dataset/train"
test_path = ".../Desktop/Data/fruit-dataset/train"
categories = ["freshapple", "freshorange", "freshbanana",
"rottenapple", "rottenorange", "rottenbanana"]
From other resources I've seen, because my dataset contains over 13k images, I would need to use flow_from_directory(), as loading into memory would cause a crash at runtime.
I'm confused on what the next steps are to get this dataset loaded in.
For other information, I will be using a tuned MobilenetV2 model. (experimenting with freezing layers)
there are a number of ways to load the data. I prefer to use pandas dataframes because it is easy to partition the data in various ways. The code below should be what you need
sdir = r'.../Desktop/Data/fruit-dataset'
categories=['train', 'test']
for category in categories:
catpath=os.path.join(sdir, category)
classlist=os.listdir(catpath)
filepaths=[]
labels=[]
for klass in classlist:
classpath=os.path.join(catpath,klass)
flist=os.listdir(classpath)
for f in flist:
fpath=os.path.join(classpath,f)
filepaths.append(fpath)
labels.append(klass)
Fseries=pd.Series(filepaths, name='filepaths')
Lseries=pd.Series(labels, name='labels')
if category=='train':
df=pd.concat([Fseries, Lseries], axis=1)
else:
test_df=pd.concat([Fseries, Lseries], axis=1)
# create a validation data set
train_df, valid_df=train_test_split(df, train_size=.8, shuffle=True, random_state=123)
print('train_df length: ', len(train_df), ' test_df length: ',len(test_df), ' valid_df length: ', len(valid_df))
balance=list(train_df['labels'].value_counts())
# check the balance of the training set
for b in balance:
print (b)
height=224
width=224
channels=3
batch_size=40
img_shape=(height, width, channels)
img_size=(height, width)
length=len(test_df)
test_batch_size=sorted([int(length/n) for n in range(1,length+1) if length % n ==0 and length/n<=80],reverse=True)[0]
test_steps=int(length/test_batch_size)
print ( 'test batch size: ' ,test_batch_size, ' test steps: ', test_steps)
def scalar(img):
img=img/255
return img
trgen=ImageDataGenerator(preprocessing_function=scalar, horizontal_flip=True)
tvgen=ImageDataGenerator(preprocessing_function=scalar)
train_gen=trgen.flow_from_dataframe( train_df, x_col='filepaths', y_col='labels', target_size=img_size, class_mode='categorical',
color_mode='rgb', shuffle=True, batch_size=batch_size)
test_gen=tvgen.flow_from_dataframe( test_df, x_col='filepaths', y_col='labels', target_size=img_size, class_mode='categorical',
color_mode='rgb', shuffle=False, batch_size=test_batch_size)
valid_gen=tvgen.flow_from_dataframe( valid_df, x_col='filepaths', y_col='labels', target_size=img_size, class_mode='categorical',
color_mode='rgb', shuffle=True, batch_size=batch_size)
classes=list(train_gen.class_indices.keys())
class_count=len(classes)
history=model.fit(x=train_gen, epochs=20, verbose=2, validation_data=valid_gen,
validation_steps=None, shuffle=False, initial_epoch=0)
Or a simplier way but less versitile is with flow_from_directory
gen=tf.keras.preprocessing.image.ImageDataGenerator( rescale=1/255,
validation_split=0.1)
tgen=tf.keras.preprocessing.image.ImageDataGenerator( rescale=1/255)
train_dir=r'.../Desktop/Data/fruit-dataset/train'
train_gen=gen.flow_from_directoy(train_dir, target_size=(256, 256),
class_mode="categorical", batch_size=32, shuffle=True,
seed=123, subset='training)
valid_gen=gen.flow_from_directory(train_dir, target_size=(256, 256),
class_mode="categorical", batch_size=32, shuffle=True,
seed=123, subset='validation')
test_dir=r'.../Desktop/Data/fruit-dataset/test' # you had this wrong in your code
test_gen=tgen.flow_from_directory(test_dir, target_size=(256, 256),
class_mode="categorical", batch_size=32, shuffle=False)
history=model.fit(x=train_gen, epochs=20, verbose=2, validation_data=valid_gen,
validation_steps=None, shuffle=False, initial_epoch=0)
I have a single directory which contains sub-folders (according to labels) of images. I want to split this data into train and test set while using ImageDataGenerator in Keras. Although model.fit() in keras has argument validation_split for specifying the split, I could not find the same for model.fit_generator(). How to do it ?
train_datagen = ImageDataGenerator(rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
samples_per_epoch=nb_train_samples,
nb_epoch=nb_epoch,
validation_data=??,
nb_val_samples=nb_validation_samples)
I don't have separate directory for validation data, need to split it from the training data
Keras has now added Train / validation split from a single directory using ImageDataGenerator:
train_datagen = ImageDataGenerator(rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
validation_split=0.2) # set validation split
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='binary',
subset='training') # set as training data
validation_generator = train_datagen.flow_from_directory(
train_data_dir, # same directory as training data
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='binary',
subset='validation') # set as validation data
model.fit_generator(
train_generator,
steps_per_epoch = train_generator.samples // batch_size,
validation_data = validation_generator,
validation_steps = validation_generator.samples // batch_size,
epochs = nb_epochs)
https://keras.io/preprocessing/image/
For example, you have folder like this
full_dataset
|--horse (40 images)
|--donkey (30 images)
|--cow ((50 images)
|--zebra (70 images)
FIRST WAY
image_generator = ImageDataGenerator(rescale=1/255, validation_split=0.2)
train_dataset = image_generator.flow_from_directory(batch_size=32,
directory='full_dataset',
shuffle=True,
target_size=(280, 280),
subset="training",
class_mode='categorical')
validation_dataset = image_generator.flow_from_directory(batch_size=32,
directory='full_dataset',
shuffle=True,
target_size=(280, 280),
subset="validation",
class_mode='categorical')
SECOND WAY
import glob
horse = glob.glob('full_dataset/horse/*.*')
donkey = glob.glob('full_dataset/donkey/*.*')
cow = glob.glob('full_dataset/cow/*.*')
zebra = glob.glob('full_dataset/zebra/*.*')
data = []
labels = []
for i in horse:
image=tf.keras.preprocessing.image.load_img(i, color_mode='RGB',
target_size= (280,280))
image=np.array(image)
data.append(image)
labels.append(0)
for i in donkey:
image=tf.keras.preprocessing.image.load_img(i, color_mode='RGB',
target_size= (280,280))
image=np.array(image)
data.append(image)
labels.append(1)
for i in cow:
image=tf.keras.preprocessing.image.load_img(i, color_mode='RGB',
target_size= (280,280))
image=np.array(image)
data.append(image)
labels.append(2)
for i in zebra:
image=tf.keras.preprocessing.image.load_img(i, color_mode='RGB',
target_size= (280,280))
image=np.array(image)
data.append(image)
labels.append(3)
data = np.array(data)
labels = np.array(labels)
from sklearn.model_selection import train_test_split
X_train, X_test, ytrain, ytest = train_test_split(data, labels, test_size=0.2,
random_state=42)
Main drawback from First way, you can't use for display a picture. It will error if you write validation_dataset[1]. But it worked if I use first way : X_test[1]
With reference to this question https://github.com/keras-team/keras/issues/597 , you can use the following code to split the whole set into train and val:
train_datagen = ImageDataGenerator(rescale=1./255,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True
validation_split=0.2) # val 20%
val_datagen = ImageDataGenerator(rescale=1./255, validation_split=0.2)
train_data = train_datagen.flow_from_directory(train_path,
target_size=(224, 224),
color_mode='rgb',
batch_size=BS,
class_mode='categorical',
shuffle=True,
subset = 'training')
val_data = val_datagen.flow_from_directory(train_path,
target_size=(224, 224),
color_mode='rgb',
batch_size=BS,
class_mode='categorical',
shuffle=False,
subset = 'validation')
If we use subset in ImageDataGenerator then same augmentation will be applied to both training and validation. If you want to apply augmentation only on training set, you can split the folders using split-folders package which can be installed directly using pip.
https://pypi.org/project/split-folders/
This will separate the dataset into train, val and test directory and then you can create separate generator for each of them.
I have a PR for it. One way is to hash the filenames and do a variant assignment.
Example:
# -*- coding: utf-8 -*-
"""Train model using transfer learning."""
import os
import re
import glob
import hashlib
import argparse
import warnings
import six
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import gfile
from keras.models import Model
from keras import backend as K
from keras.optimizers import SGD
from keras.layers import Dense, GlobalAveragePooling2D, Input
from keras.applications.inception_v3 import InceptionV3
from keras.preprocessing.image import (ImageDataGenerator, Iterator,
array_to_img, img_to_array, load_img)
from keras.callbacks import ModelCheckpoint, TensorBoard, EarlyStopping
RANDOM_SEED = 0
MAX_NUM_IMAGES_PER_CLASS = 2 ** 27 - 1 # ~134M
VALID_IMAGE_FORMATS = frozenset(['jpg', 'jpeg', 'JPG', 'JPEG'])
# we chose to train the top 2 inception blocks
BATCH_SIZE = 100
TRAINABLE_LAYERS = 172
INCEPTIONV3_BASE_LAYERS = len(InceptionV3(weights=None, include_top=False).layers)
STEPS_PER_EPOCH = 625
VALIDATION_STEPS = 100
MODEL_INPUT_WIDTH = 299
MODEL_INPUT_HEIGHT = 299
MODEL_INPUT_DEPTH = 3
FC_LAYER_SIZE = 1024
# Helper: Save the model.
checkpointer = ModelCheckpoint(
filepath='./output/checkpoints/inception.{epoch:03d}-{val_loss:.2f}.hdf5',
verbose=1,
save_best_only=True)
# Helper: Stop when we stop learning.
early_stopper = EarlyStopping(patience=10)
# Helper: TensorBoard
tensorboard = TensorBoard(log_dir='./output/')
def as_bytes(bytes_or_text, encoding='utf-8'):
"""Converts bytes or unicode to `bytes`, using utf-8 encoding for text.
# Arguments
bytes_or_text: A `bytes`, `str`, or `unicode` object.
encoding: A string indicating the charset for encoding unicode.
# Returns
A `bytes` object.
# Raises
TypeError: If `bytes_or_text` is not a binary or unicode string.
"""
if isinstance(bytes_or_text, six.text_type):
return bytes_or_text.encode(encoding)
elif isinstance(bytes_or_text, bytes):
return bytes_or_text
else:
raise TypeError('Expected binary or unicode string, got %r' %
(bytes_or_text,))
class CustomImageDataGenerator(ImageDataGenerator):
def flow_from_image_lists(self, image_lists,
category, image_dir,
target_size=(256, 256), color_mode='rgb',
class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='jpeg'):
return ImageListIterator(
image_lists, self,
category, image_dir,
target_size=target_size, color_mode=color_mode,
class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format)
class ImageListIterator(Iterator):
"""Iterator capable of reading images from a directory on disk.
# Arguments
image_lists: Dictionary of training images for each label.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
classes: Optional list of strings, names of sudirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
"""
def __init__(self, image_lists, image_data_generator,
category, image_dir,
target_size=(256, 256), color_mode='rgb',
class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='jpeg'):
if data_format is None:
data_format = K.image_data_format()
classes = list(image_lists.keys())
self.category = category
self.num_class = len(classes)
self.image_lists = image_lists
self.image_dir = image_dir
how_many_files = 0
for label_name in classes:
for _ in self.image_lists[label_name][category]:
how_many_files += 1
self.samples = how_many_files
self.class2id = dict(zip(classes, range(len(classes))))
self.id2class = dict((v, k) for k, v in self.class2id.items())
self.classes = np.zeros((self.samples,), dtype='int32')
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
if class_mode not in {'categorical', 'binary', 'sparse', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
i = 0
self.filenames = []
for label_name in classes:
for j, _ in enumerate(self.image_lists[label_name][category]):
self.classes[i] = self.class2id[label_name]
img_path = get_image_path(self.image_lists,
label_name,
j,
self.image_dir,
self.category)
self.filenames.append(img_path)
i += 1
print("Found {} {} files".format(len(self.filenames), category))
super(ImageListIterator, self).__init__(self.samples, batch_size, shuffle,
seed)
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
with self.lock:
index_array, current_index, current_batch_size = next(
self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
batch_x = np.zeros((current_batch_size,) + self.image_shape,
dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
img = load_img(self.filenames[j],
grayscale=grayscale,
target_size=self.target_size)
x = img_to_array(img, data_format=self.data_format)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i in range(current_batch_size):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=current_index + i,
hash=np.random.randint(10000),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(K.floatx())
elif self.class_mode == 'categorical':
batch_y = np.zeros((len(batch_x), self.num_class),
dtype=K.floatx())
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/image_retraining/retrain.py
def create_image_lists(image_dir, validation_pct=10):
"""Builds a list of training images from the file system.
Analyzes the sub folders in the image directory, splits them into stable
training, testing, and validation sets, and returns a data structure
describing the lists of images for each label and their paths.
# Arguments
image_dir: string path to a folder containing subfolders of images.
validation_pct: integer percentage of images reserved for validation.
# Returns
dictionary of label subfolder, with images split into training
and validation sets within each label.
"""
if not os.path.isdir(image_dir):
raise ValueError("Image directory {} not found.".format(image_dir))
image_lists = {}
sub_dirs = [x[0] for x in os.walk(image_dir)]
sub_dirs_without_root = sub_dirs[1:] # first element is root directory
for sub_dir in sub_dirs_without_root:
file_list = []
dir_name = os.path.basename(sub_dir)
if dir_name == image_dir:
continue
print("Looking for images in '{}'".format(dir_name))
for extension in VALID_IMAGE_FORMATS:
file_glob = os.path.join(image_dir, dir_name, '*.' + extension)
file_list.extend(glob.glob(file_glob))
if not file_list:
warnings.warn('No files found')
continue
if len(file_list) < 20:
warnings.warn('Folder has less than 20 images, which may cause '
'issues.')
elif len(file_list) > MAX_NUM_IMAGES_PER_CLASS:
warnings.warn('WARNING: Folder {} has more than {} images. Some '
'images will never be selected.'
.format(dir_name, MAX_NUM_IMAGES_PER_CLASS))
label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower())
training_images = []
validation_images = []
for file_name in file_list:
base_name = os.path.basename(file_name)
# Get the hash of the file name and perform variant assignment.
hash_name = hashlib.sha1(as_bytes(base_name)).hexdigest()
hash_pct = ((int(hash_name, 16) % (MAX_NUM_IMAGES_PER_CLASS + 1)) *
(100.0 / MAX_NUM_IMAGES_PER_CLASS))
if hash_pct < validation_pct:
validation_images.append(base_name)
else:
training_images.append(base_name)
image_lists[label_name] = {
'dir': dir_name,
'training': training_images,
'validation': validation_images,
}
return image_lists
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/image_retraining/retrain.py
def get_image_path(image_lists, label_name, index, image_dir, category):
""""Returns a path to an image for a label at the given index.
# Arguments
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Int offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
image_dir: Root folder string of the subfolders containing the training
images.
category: Name string of set to pull images from - training, testing, or
validation.
# Returns
File system path string to an image that meets the requested parameters.
"""
if label_name not in image_lists:
raise ValueError('Label does not exist ', label_name)
label_lists = image_lists[label_name]
if category not in label_lists:
raise ValueError('Category does not exist ', category)
category_list = label_lists[category]
if not category_list:
raise ValueError('Label %s has no images in the category %s.',
label_name, category)
mod_index = index % len(category_list)
base_name = category_list[mod_index]
sub_dir = label_lists['dir']
full_path = os.path.join(image_dir, sub_dir, base_name)
return full_path
def get_generators(image_lists, image_dir):
train_datagen = CustomImageDataGenerator(rescale=1. / 255,
horizontal_flip=True)
test_datagen = CustomImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_image_lists(
image_lists=image_lists,
category='training',
image_dir=image_dir,
target_size=(MODEL_INPUT_HEIGHT, MODEL_INPUT_WIDTH),
batch_size=BATCH_SIZE,
class_mode='categorical',
seed=RANDOM_SEED)
validation_generator = test_datagen.flow_from_image_lists(
image_lists=image_lists,
category='validation',
image_dir=image_dir,
target_size=(MODEL_INPUT_HEIGHT, MODEL_INPUT_WIDTH),
batch_size=BATCH_SIZE,
class_mode='categorical',
seed=RANDOM_SEED)
return train_generator, validation_generator
def get_model(num_classes, weights='imagenet'):
# create the base pre-trained model
# , input_tensor=input_tensor
base_model = InceptionV3(weights=weights, include_top=False)
# add a global spatial average pooling layer
x = base_model.output
x = GlobalAveragePooling2D()(x)
# let's add a fully-connected layer
x = Dense(FC_LAYER_SIZE, activation='relu')(x)
# and a logistic layer -- let's say we have 2 classes
predictions = Dense(num_classes, activation='softmax')(x)
# this is the model we will train
model = Model(inputs=[base_model.input], outputs=[predictions])
return model
def get_top_layer_model(model):
"""Used to train just the top layers of the model."""
# first: train only the top layers (which were randomly initialized)
# i.e. freeze all convolutional InceptionV3 layers
for layer in model.layers[:INCEPTIONV3_BASE_LAYERS]:
layer.trainable = False
for layer in model.layers[INCEPTIONV3_BASE_LAYERS:]:
layer.trainable = True
# compile the model (should be done after setting layers to non-trainable)
model.compile(optimizer='rmsprop', loss='categorical_crossentropy',
metrics=['accuracy'])
return model
def get_mid_layer_model(model):
"""After we fine-tune the dense layers, train deeper."""
# freeze the first TRAINABLE_LAYER_INDEX layers and unfreeze the rest
for layer in model.layers[:TRAINABLE_LAYERS]:
layer.trainable = False
for layer in model.layers[TRAINABLE_LAYERS:]:
layer.trainable = True
# we need to recompile the model for these modifications to take effect
# we use SGD with a low learning rate
model.compile(optimizer=SGD(lr=0.0001, momentum=0.9),
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
def train_model(model, epochs, generators, callbacks=None):
train_generator, validation_generator = generators
model.fit_generator(
train_generator,
steps_per_epoch=STEPS_PER_EPOCH,
validation_data=validation_generator,
validation_steps=VALIDATION_STEPS,
epochs=epochs,
callbacks=callbacks)
return model
def main(image_dir, validation_pct):
sub_dirs = [x[0] for x in gfile.Walk(image_dir)]
num_classes = len(sub_dirs) - 1
print("Number of classes found: {}".format(num_classes))
model = get_model(num_classes)
print("Using validation percent of %{}".format(validation_pct))
image_lists = create_image_lists(image_dir, validation_pct)
generators = get_generators(image_lists, image_dir)
# Get and train the top layers.
model = get_top_layer_model(model)
model = train_model(model, epochs=10, generators=generators)
# Get and train the mid layers.
model = get_mid_layer_model(model)
_ = train_model(model, epochs=100, generators=generators,
callbacks=[checkpointer, early_stopper, tensorboard])
# save model
model.save('./output/model.hdf5', overwrite=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--image-dir', required=True, help='data directory')
parser.add_argument('--validation-pct', default=10, help='validation percentage')
args = parser.parse_args()
os.makedirs('./output/checkpoints/', exist_ok=True)
main(**vars(args))
If you simply want to divide the dataset for training and validation (without any augmentation, etc.)
from tensorflow.keras.applications.xception import preprocess_input
from tensorflow.keras.preprocessing.image import ImageDataGenerator
ds_gen = ImageDataGenerator(
preprocessing_function=preprocess_input,
validation_split=0.2
)
train_ds = ds_gen.flow_from_directory(
"/path/to/dataset",
seed=1,
target_size=(150, 150), #adjust to your needs
batch_size=32,#adjust to your needs
class_mode='categorical',
subset='training'
)
val_ds = ds_gen.flow_from_directory(
"/path/to/dataset",
seed=1,
target_size=(150, 150),
batch_size=32,
class_mode='categorical',
subset='validation'
)
Here is the answer:
data_path = 'path/to/dir'
data_gen = ImageDataGenerator(rescale=1./255, validation_split=0.3)
train_data = data_gen.flow_from_directory(directory=data_path,target_size=img_size, batch_size=batch_size, subset='training', seed=42, class_mode='binary' )
test_data = data_gen.flow_from_directory(directory=data_path,target_size=img_size, batch_size=batch_size, subset='validation', seed=42, class_mode='binary' )
This is a simple tensorflow 2.0 code
from tensorflow.keras.preprocessing.image import ImageDataGenerator
def image_data_generator(data_dir,
data_augment=False,
batch_size=BATCH_SIZE,
target_size=(100, 100),
color_mode='rgb',
class_mode='binary',
shuffle=True):
if data_augment:
datagen = ImageDataGenerator(rescale=1./255,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
validation_split=0.2,#this is the trick
horizontal_flip=True)
else:
datagen = ImageDataGenerator(rescale=1./255)
generator = datagen.flow_from_directory(data_dir,
target_size=target_size,
color_mode=color_mode,
batch_size=batch_size,
shuffle=shuffle,
class_mode=class_mode)
return generator
train_generator = image_data_generator('Your_DataBase_Path',data_augment=True)
if you want to use pre processing units of VGG16 model and split your dataset into 70% training and 30% validation just follow this approach:
train_path = 'your dataset path'
train_batch=
ImageDataGenerator(preprocessing_function=tf.keras.applications.vgg16.preprocess_input, validation_split=0.3) \
.flow_from_directory(directory=train_path, target_size=(,), classes=['',''], batch_size= ,class_mode='categorical', subset='training')
val_batch=
ImageDataGenerator(preprocessing_function=tf.keras.applications.vgg16.preprocess_input, validation_split=0.3) \
.flow_from_directory(directory=train_path, target_size=(,), classes=['',''], batch_size=, class_mode='categorical', subset='validation')
If you are using TensorFlow 2.x, you can use the same fit() function and use the parameter validation_split also for Image ImageDataGenerator
I don't know if you are still interested, but I found the following workaround. The most important function is GetTrainValidTestGeneratorFromDir, the other ones are just used by it. The basic idea is that you first divide the ImageDataGenerator by two using validation_split. By means of this you will get two iterators. You can use the second one as the test iterator. You will further divide the first one in the following way:
First use flow_from_directory using training subset (so you can be sure that test data are excluded). Now you can use the same generator to get two divided dataframes and then you can use flow_from_dataframe function. You will get three ImageDataIterators without changing the folders
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 15 10:15:18 2021
#author: Alessandro
"""
import pandas as pd
from keras.preprocessing.image import ImageDataGenerator
def ShuffleDataframe(thedataframe):
thedataframe = thedataframe.sample(n=len(thedataframe), random_state=42)
thedataframe = thedataframe.reset_index()
thedataframe.drop('index', axis='columns', inplace=True)
return(thedataframe)
def TransformGeneratorClassNumberToLabels(theGenerator, theLabelsNumbers):
labelnames = theGenerator.class_indices
labelnames = list(labelnames.keys())
theLabelsString = [labelnames[i] for i in theLabelsNumbers]
return(theLabelsString)
def GetGeneratorDataframe(theGenerator):
training_filenames = theGenerator.filenames
theLabelsNumbers = theGenerator.classes
thelabelsString = TransformGeneratorClassNumberToLabels(theGenerator,
theLabelsNumbers)
thedataframe = pd.DataFrame({'File': training_filenames,
'Label': thelabelsString})
thedataframe = ShuffleDataframe(thedataframe)
return(thedataframe)
def GetTrainValidTestGeneratorFromDir(thedirectory,
input_shape= (256, 256, 3),
validation_split=0.1,
rescaling = 1./255):
train_datagen = ImageDataGenerator(rescale=1./255,
validation_split=0.2)
train_and_valid_generator = train_datagen.flow_from_directory(thedirectory,
target_size=input_shape[0:2],
batch_size=20,
class_mode="categorical",
subset = 'training',
save_to_dir ='checkdir')
test_generator = train_datagen.flow_from_directory(thedirectory,
target_size=input_shape[0:2],
batch_size=20,
class_mode="categorical",
subset = 'validation')
thedataframe = GetGeneratorDataframe(train_and_valid_generator)
class_mode = "categorical"
training_generator = train_datagen.flow_from_dataframe(dataframe = thedataframe,
directory = thedirectory,
target_size=input_shape[0:2],
x_col="File",
y_col = "Label",
subset= "training",
class_mode=class_mode)
validation_generator = train_datagen.flow_from_dataframe(dataframe = thedataframe,
directory = thedirectory,
target_size=input_shape[0:2],
x_col="File",
y_col = "Label",
subset= "validation",
class_mode=class_mode)
return training_generator, validation_generator, test_generator
input_shape = (450, 450, 3)
myDir = "MyFolder"
(training_generator,
validation_generator,
test_generator) = GetTrainValidTestGeneratorFromDir(myDir)
# next part is just to verify whhat you got
training_dataframe = GetGeneratorDataframe(training_generator)
valid_dataframe = GetGeneratorDataframe(validation_generator)
test_dataframe = GetGeneratorDataframe(test_generator)
Note that, starting from TF 2.9, the ImageDataGenerator() has been deprecated in favour of tf.keras.utils.image_dataset_from_directory() which achieves the same functionality.
It is highly likely that the former will be removed altogether in the upcoming TF versions.
Deprecated: tf.keras.preprocessing.image.ImageDataGenerator is not
recommended for new code. Prefer loading images with
tf.keras.utils.image_dataset_from_directory and transforming the
output tf.data.Dataset with preprocessing layers. For more
information, see the tutorials for loading images and augmenting
images, as well as the preprocessing layer guide.
In case you are looking for train/validation split in generators for segmentation task, you can use the following snippet:
from tensorflow.keras.preprocessing.image import ImageDataGenerator
BATCH_SIZE = 16
val_fraction = 0.1
image_generator = ImageDataGenerator(rescale=1/255,
brightness_range=[0.75, 1.75],
validation_split=val_fraction)
mask_generator = ImageDataGenerator(validation_split=val_fraction)
train_image_generator = image_generator.flow_from_dataframe(df_img,
directory=image_dir,
x_col='image',
class_mode=None,
color_mode='rgb',
target_size=(INPUT_SIZE, INPUT_SIZE),
batch_size=BATCH_SIZE,
shuffle=True,
subset='training',
seed=1)
train_mask_generator = mask_generator.flow_from_dataframe(df_gt,
directory=gt_dir,
x_col='mask',
color_mode='grayscale',
class_mode=None,
target_size=(INPUT_SIZE, INPUT_SIZE),
batch_size=BATCH_SIZE,
shuffle=True,
subset='training',
seed=1)
validation_image_generator = image_generator.flow_from_dataframe(df_img,
directory=image_dir,
x_col='image',
class_mode=None,
color_mode='rgb',
target_size=(INPUT_SIZE, INPUT_SIZE),
batch_size=BATCH_SIZE,
subset='validation',
seed=1)
validation_mask_generator = mask_generator.flow_from_dataframe(df_gt,
directory=gt_dir,
x_col='mask',
color_mode='grayscale',
class_mode=None,
target_size=(INPUT_SIZE, INPUT_SIZE),
batch_size=BATCH_SIZE,
subset='validation',
seed=1)
train_generator = zip(train_image_generator, train_mask_generator)
validation_generator = zip(validation_image_generator, validation_mask_generator)