image preprocess function for image_dataset_from_directory - tensorflow

In the ImageDataGenerator, I've used the following function to preprocess images, through the keyword of 'preprocessing' in .flow_from_dataframe().
However, I am now trying to use the image_dataset_from_directory, which does not work with the preprocess function, as it does not allow embedding this function.
I've tried to apply the preprocess_image() function after the dataset is generated by image_dataset_from_directory, through .map() function, but it does not work either.
Please could anyone advise?
Many thanks,
Tony
train_Gen = dataGen.flow_from_dataframe(
df,
x_col='id_code',
y_col='diagnosis',
directory=os.path.join(data_dir, 'train_images'),
batch_size=BATCH_SIZE,
target_size=(IMG_WIDTH, IMG_HEIGHT),
subset='training',
seed=123,
class_mode='categorical',
**preprocessing=preprocess_image**,
)
def crop_image_from_gray(img, tol=7):
"""
Applies masks to the orignal image and
returns the a preprocessed image with
3 channels
:param img: A NumPy Array that will be cropped
:param tol: The tolerance used for masking
:return: A NumPy array containing the cropped image
"""
# If for some reason we only have two channels
if img.ndim == 2:
mask = img > tol
return img[np.ix_(mask.any(1),mask.any(0))]
# If we have a normal RGB images
elif img.ndim == 3:
gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
mask = gray_img > tol
check_shape = img[:,:,0][np.ix_(mask.any(1),mask.any(0))].shape[0]
if (check_shape == 0): # image is too dark so that we crop out everything,
return img # return original image
else:
img1=img[:,:,0][np.ix_(mask.any(1),mask.any(0))]
img2=img[:,:,1][np.ix_(mask.any(1),mask.any(0))]
img3=img[:,:,2][np.ix_(mask.any(1),mask.any(0))]
img = np.stack([img1,img2,img3],axis=-1)
return img
def preprocess_image(image, sigmaX=10):
"""
The whole preprocessing pipeline:
1. Read in image
2. Apply masks
3. Resize image to desired size
4. Add Gaussian noise to increase Robustness
:param img: A NumPy Array that will be cropped
:param sigmaX: Value used for add GaussianBlur to the image
:return: A NumPy array containing the preprocessed image
"""
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = crop_image_from_gray(image)
image = cv2.resize(image, (IMG_WIDTH, IMG_HEIGHT))
image = cv2.addWeighted (image,4, cv2.GaussianBlur(image, (0,0) ,sigmaX), -4, 128)
return image

Related

Preprocessing layers with seed not producing the same data augmentation for images and masks

I'm trying to create a simple preprocessing augmentation layer, following this Tensorflow tutorial. I created this 'simple' example that shows the problem I'm having.
Even though I'm initializing the augmentation class with a seed, operations applied to the images, and the corresponding masks are not always equal.
What am I doing wrong?
Note: tf v2.10.0
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import skimage
import rasterio as rio
def normalize(array: np.ndarray):
""" normalise image to give a meaningful output """
array_min, array_max = array.min(), array.max()
return (array - array_min) / (array_max - array_min)
# field
im = rio.open('penguins.tif')
fields = np.zeros((1,im.shape[0],im.shape[1],3))
fields[0,:,:,0] = normalize(im.read(1))
fields[0,:,:,1] = normalize(im.read(2))
fields[0,:,:,2] = normalize(im.read(3))
# mask is a simple contour
masks = skimage.color.rgb2gray(skimage.filters.sobel(fields[0]))
masks = np.expand_dims(masks, [0,3])
In this case, the dataset is only one image, we can use this function to visualize the field and the mask.
def show(field:np.ndarray, mask:np.ndarray):
"""Show the field and corresponding mask."""
fig = plt.figure(figsize=(8,6))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ax1.imshow(field[:,:,:3])
ax2.imshow(mask,cmap='binary')
plt.tight_layout()
plt.show()
show(fields[0], masks[0])
Alright, now I used the example from the tutorial that will randomly flip (horizontal) the image and the mask.
class Augment(tf.keras.layers.Layer):
def __init__(self, seed=42):
super().__init__()
# both use the same seed, so they'll make the same random changes.
self.augment_inputs = tf.keras.layers.RandomFlip(mode="horizontal", seed=seed)
self.augment_labels = tf.keras.layers.RandomFlip(mode="horizontal", seed=seed)
def call(self, inputs, labels):
inputs = self.augment_inputs(inputs)
labels = self.augment_labels(labels)
return inputs, labels
Now if I run the following multiple times, I will eventually get opposite flip on the field and mask.
# Create a tf.datasets
ds = tf.data.Dataset.from_tensor_slices((fields, masks))
ds2 = ds.map(Augment())
for f,m in ds2.take(1):
show(f, m)
I would expect the image and its mask to be flip the same way since I set the seed in the Augment class as suggested in the Tensorflow tutorial.
Augmentation can be done on the concatenated image and mask along the channel axis to form a single array and then recover the image and label back, which is shown below:
class Augment(tf.keras.layers.Layer):
def __init__(self):
super().__init__()
# both use the same seed, so they'll make the same random changes.
self.augment_inputs = tf.keras.layers.RandomRotation(0.3)
def call(self, inputs, labels):
output = self.augment_inputs(tf.concat([inputs, labels], -1) )
inputs = output[:,:,0:4]
labels = output[:,:,4:]
return inputs, labels

Feeding tf.data Dataset with multidimensional output to Keras model

I want to feed a tf.data Dataset to a Keras model, but I get the following error:
AttributeError: 'DatasetV1Adapter' object has no attribute 'ndim'
This dataset will be used to solve a segmentation problem, so both input and output will be images (3D tensors)
The dataset is created with this code:
dataset = tf.data.Dataset.list_files(TRAIN_PATH + "*.png",shuffle=False)
def process_path(file_path):
img = tf.io.read_file(file_path)
img = tf.image.decode_png(img, channels=3)
train_image_path=tf.strings.regex_replace(file_path,"image","mask")
mask = tf.io.read_file(train_image_path)
mask = tf.image.decode_png(mask, channels=1)
mask = tf.squeeze(mask)
mask = tf.one_hot(tf.cast(mask, tf.int32), Num_Classes, axis = -1)
return img,mask
dataset = dataset.map(process_path)
dataset = dataset.batch(32,drop_remainder=True)
Taking an item from the dataset shows that I get a tuple containing an input tensor and an output tensor, whose dimensions are correct:
Input: (batch-size, image height, image width, 3 channels)
Output: (batch-size, image height, image width, 4 channels)
When fitting the model I get an error:
model.fit(dataset, epochs = 50)
I've solved the provem moving to Keras 2.4.3 and Tensorflow 2.2
Everything was right but apparently the previous release of Keras did not manage this tf.data correctly.
Here's a tutorial I've found very useful on this.

Transform 3D Tensor to 4D

I am using the VGG16 Model, which expects a 4D Tensor as input. When I call model.fit(xtrain, ytrain, ...) my xtrain is a list of 3D Tensor [size, size, features] - so in this case: [224,224,3]
What I want is 4D Tensors with [len(images), size, size, features]
How could I modify my code to get there?
I tried tf.expand_dims and tf.concant but it didn't work.
# Transforming my image to a 3D Tensor
image = tf.io.read_file(image)
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize(image, [IMG_SIZE, IMG_SIZE])
image = image / 255.0
Error msg after model.fit:
Error when checking input: expected input_1 to have 4 dimensions, but got array with shape (224, 224, 3)
It looks like you are reading in only a single image and passing that. If that's the case, you can add a dimension of 1 to the first axis of the image. There's lots of ways to do that.
Using reshape:
image = image.reshape(1, 224, 224, 3)
Using some fancy numpy slicing notation to add an axis (personal favorite):
image = image[None, ...]
Using numpy.expand_dims() as explained in Abhijit's answer.
I imagine you want to be reading a bunch of images in though. Possibly an issue with your input process? Can you wrap your read in a loop and read multiple files? Something like:
images = []
for file in image_files:
image = tf.io.read_file(file)
# ...
images.append(image)
images = np.asarray(images)
numpy.expand_dims(image, axis=0)

Tensorflow Object Detection API 1-channel image

Is there any way to use pre-trained models in Object Detection API of Tensorflow, which trained for RGB images, for single channel grayscale images(depth) ?
I tried the following approach to perform object detection on Grayscale (1 Channel images) using a pre-trained model (faster_rcnn_resnet101_coco_11_06_2017) in Tensorflow. It did work for me.
The model was trained on RGB Images, So I just had to modify certain code in object_detection_tutorial.ipynb, available in the Tensorflow Repo.
First Change:
Note that exisitng code in the ipynb was written for 3 Channel Images, So change the load_image_into_numpy array function as shown
From
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
To
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
channel_dict = {'L':1, 'RGB':3} # 'L' for Grayscale, 'RGB' : for 3 channel images
return np.array(image.getdata()).reshape(
(im_height, im_width, channel_dict[image.mode])).astype(np.uint8)
Second Change: Grayscale images have only data in 1 channel. To perform object detection we need 3 channels(the inference code was written for 3 channels)
This can be achieved in two ways.
a) Duplicate the single channel data into two more channels
b) Fill the other two channels with Zeros.
Both of them will work, I used the first method
In the ipynb, go the section where you read the images and convert them into numpy arrays (the forloop at the end of the ipynb).
Change the code From:
for image_path in TEST_IMAGE_PATHS:
image = Image.open(image_path)
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
image_np = load_image_into_numpy_array(image)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
To this:
for image_path in TEST_IMAGE_PATHS:
image = Image.open(image_path)
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
image_np = load_image_into_numpy_array(image)
if image_np.shape[2] != 3:
image_np = np.broadcast_to(image_np, (image_np.shape[0], image_np.shape[1], 3)).copy() # Duplicating the Content
## adding Zeros to other Channels
## This adds Red Color stuff in background -- not recommended
# z = np.zeros(image_np.shape[:-1] + (2,), dtype=image_np.dtype)
# image_np = np.concatenate((image_np, z), axis=-1)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
That's it, Run the file and you should see the results.
These are my results

Is the input of `tf.image.resize_images` must have static shape?

I run the code below, it raises an ValueError: 'images' contains no shape. Therefore I have to add the line behind # to set the static shape, but img_raw may have different shapes and this line makes the tf.image.resize_images out of effect.
I just want to turn images with different shapes to [227,227,3]. How should I do that?
def tf_read(file_queue):
reader = tf.WholeFileReader()
file_name, content = reader.read(file_queue)
img_raw = tf.image.decode_image(content,3)
# img_raw.set_shape([227,227,3])
img_resized = tf.image.resize_images(img_raw,[227,227])
img_shape = tf.shape(img_resized)
return file_name, img_resized,img_shape
The issue here actually comes from the fact that tf.image.decode_image doesn't return the shape of the image. This was explained in these two GitHub issues: issue1, issue2.
The problem comes from the fact that tf.image.decode_image also handles .gif, which returns a 4D tensor, whereas .jpg and .png return 3D images. Therefore, the correct shape cannot be returned.
The solution is to simply use tf.image.decode_jpeg or tf.image.decode_png (both work the same and can be used on .png and .jpg images).
def _decode_image(filename):
image_string = tf.read_file(filename)
image_decoded = tf.image.decode_jpeg(image_string, channels=3)
image = tf.cast(image_decoded, tf.float32)
image_resized = tf.image.resize_images(image, [224, 224])
return image_resized
No, tf.image.resize_images can handle dynamic shape
file_queue = tf.train.string_input_producer(['./dog1.jpg'])
# shape of dog1.jpg is (720, 720)
reader = tf.WholeFileReader()
file_name, content = reader.read(file_queue)
img_raw = tf.image.decode_jpeg(content, 3) # size (?, ?, 3) <= dynamic h and w
# img_raw.set_shape([227,227,3])
img_resized = tf.image.resize_images(img_raw, [227, 227])
img_shape = tf.shape(img_resized)
with tf.Session() as sess:
print img_shape.eval() #[227, 227, 3]
BTW, I am using tf v0.12, and there is no function called tf.image.decode_image, but I don't think it is important
Of course you can use tensor object as size input for tf.image.resize_images.
So, by saying "turn images with different shapes to [227,227,3]", I suppose you don't want to lose their aspect ratio, right? To achieve this, you have to rescale the input image first, then pad the rest with zero.
It should be noted, though, you should consider perform image distortion and standardization before padding it.
# Rescale so that one side of image can fit one side of the box size, then padding the rest with zeros.
# target height is 227
# target width is 227
image = a_image_tensor_you_read
shape = tf.shape(image)
img_h = shape[0]
img_w = shape[1]
box_h = tf.convert_to_tensor(target_height)
box_w = tf.convert_to_tensor(target_width)
img_ratio = tf.cast(tf.divide(img_h, img_w), tf.float32)
aim_ratio = tf.convert_to_tensor(box_h / box_w, tf.float32)
aim_h, aim_w = tf.cond(tf.greater(img_ratio, aim_ratio),
lambda: (box_h,
tf.cast(img_h / box_h * img_w, tf.int32)),
lambda: (tf.cast(img_w / box_w * img_h, tf.int32),
box_w))
image_resize = tf.image.resize_images(image, tf.cast([aim_h, aim_w], tf.int32), align_corners=True)
# Perform image standardization and distortion
image_standardized_distorted = blablabla
image_padded = tf.image.resize_image_with_crop_or_pad(image_standardized_distorted, box_h, box_w)
return image_padded