I am trying to create a preprocessing function so that the training_dataset can be directly fed into a keras sequential neural network. The preprocess function should return features and labels.
def preprocessing_function(data):
features = ...
labels = ...
return features, labels
dataset, info = tfds.load(name='cats_vs_dogs', split=tfds.Split.TRAIN, with_info=True)
training_dataset = dataset.map(preprocessing_function)
How should I write the preprocessing_function? I spent several hours researching and trying to make it happen, but to no avail. Hoping someone can assist.
Here are two functions for preprocessing. FIrst one will be applied to both train and validation data to normalize the data and resize to the expected size of network. The second function, augmentation, will be applied to training set only. The type of augmentation you want to do depends on your dataset and application, but I provided this as an example.
#Fetching, pre-processing & preparing data-pipeline
def preprocess(ds):
x = tf.image.resize_with_pad(ds['image'], IMG_SIZE_W, IMG_SIZE_H)
x = tf.cast(x, tf.float32)
x = (x-MEAN)/(VARIANCE)
y = tf.one_hot(ds['label'], NUM_CLASSES)
return x, y
def augmentation(image,label):
image = tf.image.random_flip_left_right(image)
image = tf.image.resize_with_crop_or_pad(image, IMG_W+4, IMG_W+4) # zero pad each side with 4 pixels
image = tf.image.random_crop(image, size=[BATCH_SIZE, IMG_W, IMG_H, 3]) # Random crop back to 32x32
return image, label
and to load training and validation datasets, do something like this:
def get_dataset(dataset_name, shuffle_buff_size=1024, batch_size=BATCH_SIZE, augmented=True):
train, info_train = tfds.load(dataset_name, split='train[:80%]', with_info=True)
val, info_val = tfds.load(dataset_name, split='train[80%:]', with_info=True)
TRAIN_SIZE = info_train.splits['train'].num_examples * 0.8
VAL_SIZE = info_train.splits['train'].num_examples * 0.2
train = train.map(preprocess).cache().repeat().shuffle(shuffle_buff_size).batch(batch_size)
if augmented==True:
train = train.map(augmentation)
train = train.prefetch(tf.data.experimental.AUTOTUNE)
val = val.map(preprocess).cache().repeat().batch(batch_size)
val = val.prefetch(tf.data.experimental.AUTOTUNE)
return train, info_train, val, info_val, TRAIN_SIZE, VAL_SIZE
Related
I am using transfer learning from MobileNetV3 Small to predict 5 different points on an image. I am doing this as a regression task.
For both models:
Setting the last 50 layers trainable and adding the same fully connected layers to the end.
Learning rate 3e-2
Batch size 32
Adam optimizer with the same betas
100 epochs
The inputs consist of RGB unscaled images
Pytorch
Model
def _init_weights(m):
if type(m) == nn.Linear:
nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0.01)
def get_mob_v3_small():
model = torchvision.models.mobilenet_v3_small(pretrained=True)
children_list = get_children(model)
for c in children_list[:-50]:
for p in c.parameters():
p.requires_grad = False
return model
class TransferMobileNetV3_v2(nn.Module):
def __init__(self,
num_keypoints: int = 5):
super(TransferMobileNetV3_v2, self).__init__()
self.classifier_neurons = num_keypoints*2
self.base_model = get_mob_v3_small()
self.base_model.classifier = nn.Sequential(
nn.Linear(in_features=1024, out_features=1024),
nn.ReLU(),
nn.Linear(in_features=1024, out_features=512),
nn.ReLU(),
nn.Linear(in_features=512, out_features=self.classifier_neurons)
)
self.base_model.apply(_init_weights)
def forward(self, x):
out = self.base_model(x)
return out
Training Script
def train(net, trainloader, testloader, train_loss_fn, optimizer, scaler, args):
len_dataloader = len(trainloader)
for epoch in range(1, args.epochs+1):
net.train()
for batch_idx, sample in enumerate(trainloader):
inputs, labels = sample
inputs, labels = inputs.to(args.device), labels.to(args.device)
optimizer.zero_grad()
with torch.cuda.amp.autocast(args.use_amp):
prediction = net(inputs)
loss = train_loss_fn(prediction, labels)
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
def main():
args = make_args_parser()
args.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
seed = args.seed
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
loss_fn = nn.MSELoss()
optimizer = optim.Adam(net.parameters(), lr=3e-2,
betas=(0.9, 0.999))
scaler = torch.cuda.amp.GradScaler(enabled=args.use_amp)
train(net, train_loader, test_loader, loss_fn, optimizer, scaler, args)
Tensorflow
Model
base_model = tf.keras.applications.MobileNetV3Small(weights='imagenet',
input_shape=(224,224,3))
x_in = base_model.layers[-6].output
x = Dense(units=1024, activation="relu")(x_in)
x = Dense(units=512, activation="relu")(x)
x = Dense(units=10, activation="linear")(x)
model = Model(inputs=base_model.input, outputs=x)
for layer in model.layers[:-50]:
layer.trainable=False
Training Script
model.compile(loss = "mse",
optimizer = tf.keras.optimizers.Adam(learning_rate=3e-2))
history = model.fit(input_numpy, output_numpy,
verbose=1,
batch_size=32, epochs=100,validation_split = 0.2)
Results
The PyTorch model predicts one single point around the center for all 5 different points.
The Tensorflow model predicts the points quite well and are quite accurate.
The loss in the Pytorch model is much higher than the Tensorflow model.
Please do let me know what is going wrong as I am trying my best to shift to PyTorch for this work and I need this model to give me similar/identical results. Please do let me know what is going wrong as I am trying my best to shift to PyTorch for this work and I need this model to give me similar/identical results.
Note: I also noticed that the MobileNetV3 Small model seems to be different in PyTorch and different in Tensorflow. I do not know if am interpreting it wrong, but I'm putting it here just in case.
I am trying to implement a model in keras that will have multiple inputs:
image (200x200)
some numbers (1x50)
three 1d signals (1x50000, 2x100000)
To feed that model, I want to write a generator to use with tf.data.Dataset.from_generator. From the docs of from_generator, its not clear to me how I should provide its parameters output_types, output_shapes. Can anyone help me with this?
I had a similar issue, and it took me many tries to get the structure right for those inputs. Here's an example of a network with 3 inputs and 2 outputs, complete to the .fit call.
The following works in tensorflow 2.1.0
import tensorflow as tf
import numpy as np
def generator(N=10):
"""
Returns tuple of (inputs,outputs) where
inputs = (inp1,inp2,inp2)
outputs = (out1,out2)
"""
dt=np.float32
for i in range(N):
inputs = (np.random.rand(N,3,3,1).astype(dt),
np.random.rand(N,3,3,1).astype(dt),
np.random.rand(N,3,3,1).astype(dt))
outputs = (np.random.rand(N,3,3,1).astype(dt),
np.random.rand(N,3,3,1).astype(dt))
yield inputs,outputs
# Create dataset from generator
types = ( (tf.float32,tf.float32,tf.float32),
(tf.float32,tf.float32) )
shapes = (([None,3,3,1],[None,3,3,1],[None,3,3,1]),
([None,3,3,1],[None,3,3,1]))
data = tf.data.Dataset.from_generator(generator,
output_types=types,
output_shapes=shapes
)
# Define a model
inp1 = tf.keras.Input(shape=(3,3,1),name='inp1')
inp2 = tf.keras.Input(shape=(3,3,1),name='inp2')
inp3 = tf.keras.Input(shape=(3,3,1),name='inp3')
out1 = tf.keras.layers.Conv2D(1,kernel_size=3,padding='same')(inp1)
out2 = tf.keras.layers.Conv2D(1,kernel_size=3,padding='same')(inp2)
model = tf.keras.Model(inputs=[inp1,inp2,inp3],outputs=[out1,out2])
model.compile(loss=['mse','mse'])
# Train
model.fit(data)
So assuming you have a generator that is similar to this mock:
def dummy_generator():
number_of_records = 100
for i in range(100):
an_image = tf.random.uniform((200,200,3))
some_numbers = tf.random.uniform((50,))
signal1 = tf.random.uniform((50000,))
signal2 = tf.random.uniform((100000,))
signal3 = tf.random.uniform((100000,))
yield an_image, some_numbers, signal1, signal2, signal3
each record is of datatype float32 so the output types are easy:
out_types = (tf.float32, tf.float32, tf.float32, tf.float32, tf.float32)
for the output shapes we just list the shapes in the same order:
out_shapes = ((200,200,3), (50,), (50000,), (100000,), (100000,))
so now we can just call from_generator:
ds = tf.data.Dataset.from_generator(dummy_generator,
output_types=out_types,
output_shapes=out_shapes)
model.fit([input_1, input_2, input_3], y, epochs=EPOCHS)
You got to have n(3 in the case above) input layers in your model.
I would like to apply simple data augmentation (multiplication of the input vector by a random scalar) to a fully connected neural network implemented in Keras. Keras has nice functionality for image augmentation, but trying to use this seemed awkward and slow for my input (1-tensors), whose training data set fits in my computer's memory.
Instead, I imagined that I could achieve this using a Lambda layer, e.g. something like this:
x = Input(shape=(10,))
y = x
y = Lambda(lambda z: random.uniform(0.5,1.0)*z)(y)
y = Dense(units=5, activation='relu')(y)
y = Dense(units=1, activation='sigmoid')(y)
model = Model(x, y)
My question concerns when this random number will be generated. Will this fix a single random number for:
the entire training process?
each batch?
each training data point?
Using this will create a constant that will not change at all, because random.uniform is not a keras function. You defined this operation in the graph as constant * tensor and the factor will be constant.
You need random functions "from keras" or "from tensorflow". For instance, you can take K.random_uniform((1,), 0.5, 1.).
This will be changed per batch. You can test it by training this code for a lot of epochs and see the loss changing.
from keras.layers import *
from keras.models import Model
from keras.callbacks import LambdaCallback
import numpy as np
ins = Input((1,))
outs = Lambda(lambda x: K.random_uniform((1,))*x)(ins)
model = Model(ins,outs)
print(model.predict(np.ones((1,1))))
print(model.predict(np.ones((1,1))))
print(model.predict(np.ones((1,1))))
model.compile('adam','mae')
model.fit(np.ones((100000,1)), np.ones((100000,1)))
If you want it to change for each training sample, then get a fixed batch size and generate a tensor with random numbers for each sample: K.random_uniform((batch_size,), .5, 1.).
You should probably get better performance if you do it in your own generator and model.fit_generator(), though:
class MyGenerator(keras.utils.Sequence):
def __init__(self, inputs, outputs, batchSize, minRand, maxRand):
self.inputs = inputs
self.outputs = outputs
self.batchSize = batchSize
self.minRand = minRand
self.maxRand = maxRand
#if you want shuffling
def on_epoch_end(self):
indices = np.array(range(len(self.inputs)))
np.random.shuffle(indices)
self.inputs = self.inputs[indices]
self.outputs = self.outputs[indices]
def __len__(self):
leng,rem = divmod(len(self.inputs), self.batchSize)
return (leng + (1 if rem > 0 else 0))
def __getitem__(self,i):
start = i*self.batchSize
end = start + self.batchSize
x = self.inputs[start:end] * random.uniform(self.minRand,self.maxRand)
y = self.outputs[start:end]
return x,y
The following code is copied from a GAN MNIST tutorial on UDEMY. When I run the code, it converges towards creating images with a large white area in the center that is black at the sides (picture an empty filled circle against a black background). I have no idea what the problem is as I have only done what the tutorial told me to do word for word. The only difference is that I extract the MNIST data differently. Is there something about tensorflow that has changed recently?
import tensorflow as tf
import numpy as np
import gzip
from PIL import Image
import os.path
def extract_data(filename, num_images):
"""Extract the images into a 4D tensor [image index, y, x, channels].
Values are rescaled from [0, 255] down to [-0.5, 0.5].
"""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(28 * 28 * num_images)
data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)
#data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH
data = data.reshape(num_images, 28, 28, 1)
return data
fname_img_train = extract_data('../Data/MNIST/train-images-idx3-ubyte.gz', 60000)
def generator(z, reuse=None):
with tf.variable_scope('gen',reuse=reuse):
hidden1 = tf.layers.dense(inputs=z,units=128)
alpha = 0.01
hidden1=tf.maximum(alpha*hidden1,hidden1)
hidden2=tf.layers.dense(inputs=hidden1,units=128)
hidden2 = tf.maximum(alpha*hidden2,hidden2)
output=tf.layers.dense(hidden2,units=784, activation=tf.nn.tanh)
return output
def discriminator(X, reuse=None):
with tf.variable_scope('dis',reuse=reuse):
hidden1=tf.layers.dense(inputs=X,units=128)
alpha=0.01
hidden1=tf.maximum(alpha*hidden1,hidden1)
hidden2=tf.layers.dense(inputs=hidden1,units=128)
hidden2=tf.maximum(alpha*hidden2,hidden2)
logits=tf.layers.dense(hidden2,units=1)
output=tf.sigmoid(logits)
return output, logits
real_images=tf.placeholder(tf.float32,shape=[None,784])
z=tf.placeholder(tf.float32,shape=[None,100])
G = generator(z)
D_output_real, D_logits_real = discriminator(real_images)
D_output_fake, D_logits_fake = discriminator(G,reuse=True)
def loss_func(logits_in,labels_in):
return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=logits_in,labels=labels_in))
D_real_loss = loss_func(D_logits_real,tf.ones_like(D_logits_real)*0.9)
D_fake_loss = loss_func(D_logits_fake,tf.zeros_like(D_logits_real))
D_loss = D_real_loss + D_fake_loss
G_loss = loss_func(D_logits_fake,tf.ones_like(D_logits_fake))
learning_rate = 0.001
tvars = tf.trainable_variables()
d_vars= [var for var in tvars if 'dis' in var.name]
g_vars = [var for var in tvars if 'gen' in var.name]
D_trainer = tf.train.AdamOptimizer(learning_rate).minimize(D_loss,var_list=d_vars)
G_trainer = tf.train.AdamOptimizer(learning_rate).minimize(G_loss,var_list=g_vars)
batch_size=100
epochs=30
set_size=60000
init = tf.global_variables_initializer()
samples=[]
def create_image(img, name):
img = np.reshape(img, (28, 28))
print("before")
print(img)
img = (np.multiply(np.divide(np.add(img, 1.0), 2.0),255.0).astype(np.int16))
print("after")
print(img)
im = Image.fromarray(img.astype('uint8'))
im.save(name)
with tf.Session() as sess:
sess.run(init)
for epoch in range(epochs):
np.random.shuffle(fname_img_train)
num_batches=int(set_size/batch_size)
for i in range(num_batches):
batch = fname_img_train[i*batch_size:((i+1)*batch_size)]
batch_images = np.reshape(batch, (batch_size,784))
batch_images = batch_images*2.0-1.0
batch_z = np.random.uniform(-1,1,size=(batch_size,100))
_ = sess.run(D_trainer, feed_dict={real_images:batch_images,z:batch_z})
_ = sess.run(G_trainer,feed_dict={z:batch_z})
print("ON EPOCH {}".format(epoch))
sample_z = np.random.uniform(-1,1,size=(batch_size,100))
gen_sample = sess.run(G,feed_dict={z:sample_z})
create_image(gen_sample[0], "img"+str(epoch)+".png")
As far as I can see, you are not normalizing the training data. Instead of using your extract_data() function, it is much easier to do the following:
from tensorflow.keras.datasets.mnist import load_data()
(train_data, train_labels), _ = load_data()
train_data /= 255.
Besides, usually people sample twice from the latent space each epoch: once for the discriminator and once for the generator. Still, it did not seem to make a difference.
After implementing these changes, using a batch size of 200 and training for 100 epochs, I got the following result: gen_sample. The result is pretty bad, but it is definitely better than an "empty filled circle against a black background".
Note that the architecture of the generator and of the discriminator that you are using is very simple. From my experience, stacking some convolutional layers gives perfect results. In addition, I would not use the tf.maximum() function, since it creates discontinuities that may negatively impact the flow of the gradients.
Finally, instead of your create_image() function, I used the following:
def plot_mnist(samples, name):
fig = plt.figure(figsize=(6,6))
gs = gridspec.GridSpec(6,6)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample.reshape(28,28), cmap='Greys_r')
plt.savefig('{}.png'.format(name))
plt.close()
There are many different ways of improving the quality of a GAN model, and the majority of those techniques can be easily found online. Please let me know if you have any specific question.
I use Estimator and I train model in the loop to feed data. Every step is the final step. The checkpoints are saved for every final step too. I want to avoid saving checkpoint in every iteration to increase the performance (speed) of the training.
I can not find any information how to do this. Do you have any ideas/suggestions/solutions?
classifier = Estimator(
model_fn=cnn_model_fn,
model_dir="./temp_model_Adam",
config=tf.contrib.learn.RunConfig(
save_checkpoints_secs=None,
save_checkpoints_steps=100,
save_summary_steps=None
)
)
# Train the model
for e in range(0, 10):
numbers = np.arange(10000)
np.random.shuffle(numbers)
for step in range(0, 2000):
classifier.fit(
input_fn=lambda: read_images_for_training_as_batch(step, path, 5, numbers),
steps=1
)
Nowadays the api got changed a bit but from what I see you were using the fit (currently train) method incorrectly, you should put steps=2000 and have your input function return an iterator over your dataset. Today you have tf.estimator.inputs.numpy_input_fn at your disposal that can help you when you have small data sets, otherwise you have to use tf.data.DataSet api.
Something like this (it loads .wav files):
from tensorflow.contrib.framework.python.ops import audio_ops as contrib_audio
from tensorflow.python.ops import io_ops
# ...
def input_fn(num_epochs, batch_size, shuffle=False, mode='training')
def input_fn_bound():
def _read_file(fn, label):
return io_ops.read_file(fn), label
def _decode(data, label):
pcm = contrib_audio.decode_wav(data,
desired_channels=1,
desired_samples=desired_samples)
return pcm.audio, label
filenames = get_files(mode)
classes = get_classes(mode)
labels = {'class': np.array(classes)}
dataset = tf.data.Dataset.from_tensor_slices((filenames, labels))
if shuffle:
dataset = dataset.shuffle(buffer_size=len(labels))
dataset = dataset.map(_read_file, num_parallel_calls=num_map_threads)
dataset = dataset.map(_decode, num_parallel_calls=num_map_threads)
dataset = dataset.map(lambda wav, label: ({'wav': wav}, label))
dataset = dataset.repeat(num_epochs)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(2) # To load next batch while the first one is being processed on GPU
iter = dataset.make_one_shot_iterator()
features, labels = iter.get_next()
return features, labels
return input_fn_bound
# ....
estimator.train(input_fn=input_fn(
num_epoths=None,
batch_size=64,
shuffle=True,
mode='training'),
steps=10000)