model.fit_generator() fails with use_multiprocessing=True - tensorflow

In the code example below, I can train the model only when NOT using multiprocessing.
My generator is straight from the tensorflow.keras.utils.Sequence description https://www.tensorflow.org/api_docs/python/tf/keras/utils/Sequence
Any idea how to fix the generator to allow multiprocessing?
Running on Win 10, tensorflow 1.13.1, python 3.6.8
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
from tensorflow.keras.utils import Sequence
# Generator
class DataGenerator(Sequence):
def __init__(self, dim, batch_size, n_channels):
self.dim = dim
self.batch_size = batch_size
self.n_channels = n_channels
def __len__(self):
return 100
def __getitem__(self, idx):
X = np.random.randn(self.batch_size, self.dim, self.n_channels)
Y = np.random.randn(self.batch_size, self.dim, 1)
return X, Y
dim= 32
batch_size= 64
n_channels= 3
# Generators
training_generator = DataGenerator(dim, batch_size, n_channels)
validation_generator = DataGenerator(dim, batch_size, n_channels)
# Model
model = Sequential()
model.add(layers.GRU(128, return_sequences=True,
batch_input_shape=[None, training_generator.dim, training_generator.n_channels]))
model.add(layers.Dense(1))
model.compile(loss='mse', optimizer='adam')
# This training procedure runs
model.fit_generator(generator=training_generator,
epochs = 2,
steps_per_epoch = 100,
max_queue_size = 32,
validation_data=validation_generator,
validation_steps = 20,
verbose=1)
# This training procedure fails (Only change is that I added the multiprocessing options)
model.fit_generator(generator=training_generator,
epochs = 2,
steps_per_epoch = 100,
max_queue_size = 32,
validation_data=validation_generator,
validation_steps = 20,
verbose=1,
use_multiprocessing=True,
workers=4)
I expected the second fit_generator() call to train the model like the first one. Instead, I get no output, not even an error message.

I tried your code on Ubuntu 18.04.2 LTS machine with python 3.6.8 and tensorflow 1.13.1. It works in both cases as log shown below:
2019-07-13 12:56:17.003119: I tensorflow/stream_executor/dso_loader.cc:152] successfully opened CUDA library libcublas.so.10.0 locally
100/100 [==============================] - 3s 27ms/step - loss: 0.9987
100/100 [==============================] - 10s 103ms/step - loss: 0.9973 - val_loss: 0.9987
Epoch 2/2
100/100 [==============================] - 3s 26ms/step - loss: 0.9955
100/100 [==============================] - 8s 83ms/step - loss: 1.0028 - val_loss: 0.9955
Multiprocessing=True ......
Epoch 1/2
100/100 [==============================] - 3s 32ms/step - loss: 0.9952
100/100 [==============================] - 9s 89ms/step - loss: 0.9962 - val_loss: 0.9952
Epoch 2/2
100/100 [==============================] - 3s 28ms/step - loss: 0.9967
100/100 [==============================] - 9s 86ms/step - loss: 0.9968 - val_loss: 0.9967"
My suggestion is to first try with CPU only mode, by putting BOTH the model and the fit_generator code under "with tf.device('/cpu:0'):". If it works, it would be GPU related issue, such as proper driver, tensorflow with GPU support etc. Most likely, the issue was caused by GPU hanging.

Related

Classification with PyTorch is much slower than Tensorflow: 42min vs. 11min

I have been a Tensorflow user and start to use Pytorch. As a trial, I implemented simple classification tasks with both libraries.
However, PyTorch is much slower than Tensorflow: Pytorch takes 42min while TensorFlow 11min. I referred to PyTorch official Tutorial, and made only little change from it.
Could anyone share some advice for this problem?
Here is the summary what I tried.
environment: Colab Pro+
dataset: Cifar10
classifier: VGG16
optimizer: Adam
loss: crossentropy
batch size: 32
PyTorch
Code:
import torch, torchvision
from torch import nn
from torchvision import transforms, models
from tqdm import tqdm
import time, copy
trans = transforms.Compose([transforms.Resize((224, 224)),
transforms.ToTensor(),])
data = {phase: torchvision.datasets.CIFAR10('./', train = (phase=='train'), transform=trans, download=True) for phase in ['train', 'test']}
dataloaders = {phase: torch.utils.data.DataLoader(data[phase], batch_size=32, shuffle=True) for phase in ['train', 'test']}
def train_model(model, criterion, optimizer, dataloaders, device, num_epochs=5):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'test']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in tqdm(iter(dataloaders[phase])):
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / len(dataloaders[phase])
epoch_acc = running_corrects.double() / len(dataloaders[phase])
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'test' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = models.vgg16(pretrained=False)
model = model.to(device)
model = train_model(model=model,
criterion=nn.CrossEntropyLoss(),
optimizer=torch.optim.Adam(model.parameters(), lr=0.001),
dataloaders=dataloaders,
device=device,
)
Result:
Epoch 0/4
----------
0%| | 0/1563 [00:00<?, ?it/s]/usr/local/lib/python3.7/dist-packages/torch/nn/functional.py:718: UserWarning: Named tensors and all their associated APIs are an experimental feature and subject to change. Please do not use them for anything important until they are released as stable. (Triggered internally at /pytorch/c10/core/TensorImpl.h:1156.)
return torch.max_pool2d(input, kernel_size, stride, padding, dilation, ceil_mode)
100%|██████████| 1563/1563 [07:50<00:00, 3.32it/s]
train Loss: 75.5199 Acc: 3.2809
100%|██████████| 313/313 [00:38<00:00, 8.11it/s]
test Loss: 73.7274 Acc: 3.1949
Epoch 1/4
----------
100%|██████████| 1563/1563 [07:50<00:00, 3.33it/s]
train Loss: 73.8162 Acc: 3.2514
100%|██████████| 313/313 [00:38<00:00, 8.13it/s]
test Loss: 73.6114 Acc: 3.1949
Epoch 2/4
----------
100%|██████████| 1563/1563 [07:49<00:00, 3.33it/s]
train Loss: 73.7741 Acc: 3.1369
100%|██████████| 313/313 [00:38<00:00, 8.11it/s]
test Loss: 73.5873 Acc: 3.1949
Epoch 3/4
----------
100%|██████████| 1563/1563 [07:49<00:00, 3.33it/s]
train Loss: 73.7493 Acc: 3.1331
100%|██████████| 313/313 [00:38<00:00, 8.12it/s]
test Loss: 73.6191 Acc: 3.1949
Epoch 4/4
----------
100%|██████████| 1563/1563 [07:49<00:00, 3.33it/s]
train Loss: 73.7289 Acc: 3.1939
100%|██████████| 313/313 [00:38<00:00, 8.13it/s]test Loss: 73.5955 Acc: 3.1949
Training complete in 42m 22s
Best val Acc: 3.194888
Tensorflow
Code:
import tensorflow_datasets as tfds
from tensorflow.keras import applications, models
import tensorflow as tf
import time
ds_test, ds_train = tfds.load('cifar10', split=['test', 'train'])
def resize(ip):
image = ip['image']
label = ip['label']
image = tf.image.resize(image, (224, 224))
image = tf.expand_dims(image,0)
label = tf.one_hot(label,10)
label = tf.expand_dims(label,0)
return (image, label)
ds_train_ = ds_train.map(resize)
ds_test_ = ds_test.map(resize)
model = applications.vgg16.VGG16(input_shape = (224, 224, 3), weights=None, classes=10)
model.compile(optimizer='adam', loss = 'categorical_crossentropy', metrics= ['accuracy'])
batch_size = 32
since = time.time()
history = model.fit(ds_train_,
batch_size = batch_size,
steps_per_epoch = len(ds_train)//batch_size,
epochs = 5,
validation_steps = len(ds_test),
validation_data = ds_test_,
shuffle = True,)
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format( time_elapsed // 60, time_elapsed % 60 ))
Result:
Epoch 1/5
1562/1562 [==============================] - 125s 69ms/step - loss: 36.9022 - accuracy: 0.1069 - val_loss: 2.3031 - val_accuracy: 0.1000
Epoch 2/5
1562/1562 [==============================] - 129s 83ms/step - loss: 2.3031 - accuracy: 0.1005 - val_loss: 2.3033 - val_accuracy: 0.1000
Epoch 3/5
1562/1562 [==============================] - 129s 83ms/step - loss: 2.3035 - accuracy: 0.1069 - val_loss: 2.3031 - val_accuracy: 0.1000
Epoch 4/5
1562/1562 [==============================] - 129s 83ms/step - loss: 2.3038 - accuracy: 0.1024 - val_loss: 2.3030 - val_accuracy: 0.1000
Epoch 5/5
1562/1562 [==============================] - 129s 83ms/step - loss: 2.3028 - accuracy: 0.1024 - val_loss: 2.3033 - val_accuracy: 0.1000
Training complete in 11m 23s
It is because in your tensorflow codes, the data pipeline is feeding a batch of 1 image into the model per step instead of a batch of 32 images.
Passing batch_size into model.fit does not really control the batch size when the data is in the form of datasets. The reason why it showed a seemingly correct steps per epoch from the log is that you passed steps_per_epoch into model.fit.
To correctly set the batch size:
ds_test, ds_train = tfds.load('cifar10', split=['test', 'train'])
def resize(ip):
image = ip['image']
label = ip['label']
image = tf.image.resize(image, (224, 224))
label = tf.one_hot(label,10)
return (image, label)
train_size=len(ds_train)
test_size=len(ds_test)
ds_train_ = ds_train.shuffle(train_size).batch(32).map(resize)
ds_test_ = ds_test.shuffle(test_size).batch(32).map(resize)
model.fit call:
history = model.fit(ds_train_,
epochs = 1,
validation_data = ds_test_)
After fixed the problem, tensorflow got similar speed performance with pytorch. In my machine, pytorch took ~27 minutes per epoch while tensorflow took ~24 minutes per epoch.
According to the benchmarks from NVIDIA, pytorch and tensorflow had similar speed performance in most popular deep learning applications with real-world datasets and problem size. (Reference: https://developer.nvidia.com/deep-learning-performance-training-inference)

Training CNN with Keras was fast, now it is many times slower

I was training a CNN with 120 thousand pictures, and it was ok. About 320 seconds per epoch.
3073/3073 [==============================] - 340s 110ms/step - loss: 0.4146 - accuracy: 0.8319 - val_loss: 0.3776 - val_accuracy: 0.8489
Epoch 2/20
3073/3073 [==============================] - 324s 105ms/step - loss: 0.3462 - accuracy: 0.8683 - val_loss: 0.3241 - val_accuracy: 0.8770
Epoch 3/20
3073/3073 [==============================] - 314s 102ms/step - loss: 0.3061 - accuracy: 0.8878 - val_loss: 0.2430 - val_accuracy: 0.9052
Epoch 4/20
3073/3073 [==============================] - 327s 107ms/step - loss: 0.2851 - accuracy: 0.8977 - val_loss: 0.2236 - val_accuracy: 0.9149
Epoch 5/20
3073/3073 [==============================] - 318s 104ms/step - loss: 0.2725 - accuracy: 0.9033 - val_loss: 0.2450 - val_accuracy: 0.9119
Epoch 6/20
3073/3073 [==============================] - 309s 101ms/step - loss: 0.2642 - accuracy: 0.9065 - val_loss: 0.2168 - val_accuracy: 0.9218
Epoch 7/20
3073/3073 [==============================] - 311s 101ms/step - loss: 0.2589 - accuracy: 0.9083 - val_loss: 0.1996 - val_accuracy: 0.9286
Epoch 8/20
3073/3073 [==============================] - 317s 103ms/step - loss: 0.2538 - accuracy: 0.9110 - val_loss: 0.2653 - val_accuracy: 0.9045
Epoch 9/20
3073/3073 [==============================] - 1346s 438ms/step - loss: 0.2497 - accuracy: 0.9116 - val_loss: 0.2353 - val_accuracy: 0.9219
Epoch 10/20
3073/3073 [==============================] - 1434s 467ms/step - loss: 0.2457 - accuracy: 0.9141 - val_loss: 0.1943 - val_accuracy: 0.9326`
Then, after a few tests, it became 12x (times) slower. With the same parameters. I thought it was because the CPU got too hot, but it never worked like before again. I tried reseting my keras session, tried reinstalling Ubuntu 18.04, tried setting up my GPU, tried installing on conda, but none of these worked. Also tried other codes, with different datasets, which are also slower. Quite frustrating.
No other symptom of a burnt CPU problem. Everything runs as always did.
Tensorflow 2.2.0
I would appreciate some help.
Edit: actually, if I wait one hour, it gets back to normal speed! Now I think it could be some problem with the first import from the disk into the memory.
import tensorflow as tf
from keras.preprocessing.image import ImageDataGenerator
from keras.models import model_from_json
from keras.preprocessing import image
import os
#======= GPU
#os.environ["CUDA_VISIBLE_DEVICES"] = "-1" #disables GPU
#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '4' #verbose
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
from tensorflow.compat.v1.keras import backend as K
K.clear_session()
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
K.set_session(session)
#======= GPU
print(tf.__version__)
callbacks = [
tf.keras.callbacks.EarlyStopping(
# Stop training when `val_loss` is no longer improving
monitor='val_accuracy',
# "no longer improving" being defined as "no better than 1e-2 less"
min_delta=1e-3,
# "no longer improving" being further defined as "for at least 2 epochs"
patience=2,
verbose=1)
]
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
#flow_from_directory knows categories are divided by folders
training_set = train_datagen.flow_from_directory('dataset/training_set', #applies a method to the object
target_size = (64, 64),
batch_size = 32, #minibatch
class_mode = 'categorical')
print(training_set.class_indices)
test_datagen = ImageDataGenerator(rescale = 1./255)
test_set = test_datagen.flow_from_directory('dataset/test_set',
target_size = (64, 64), #has to be the same as that of the training set
batch_size = 32,
class_mode = 'categorical')
cnn = tf.keras.models.Sequential() #an ANN base
#convolution
cnn.add(tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation='relu', input_shape=[64, 64, 3])) #convolutional layer
cnn.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2)) #maxPooling
#new convolution
cnn.add(tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation='relu')) #remove input_shape
cnn.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2))
cnn.add(tf.keras.layers.Flatten())
cnn.add(tf.keras.layers.Dense(units=128, activation='relu'))
cnn.add(tf.keras.layers.Dense(units=2, activation='softmax'))
cnn.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
print("Got here.")
cnn.fit(x = training_set, validation_data = test_set, epochs = 20)
# serialize model to JSON
model_json = cnn.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
cnn.save_weights("model.h5")
print("Saved model to disk.")

Forward Pass calculation on current batch in "get_updates" method of Keras SGD Optimizer

I am trying to implement a stochastic armijo rule in the get_gradient method of Keras SGD optimizer.
Therefore, I need to calculate another forward pass to check if the learning_rate chosen was good. I don't want another calculation of the gradients, but I want to use the updated weights.
Using Keras Version 2.3.1 and Tensorflow Version 1.14.0
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
lr = self.learning_rate
if self.initial_decay > 0:
lr = lr * (1. / (1. + self.decay * K.cast(self.iterations,
K.dtype(self.decay))))
# momentum
shapes = [K.int_shape(p) for p in params]
moments = [K.zeros(shape, name='moment_' + str(i))
for (i, shape) in enumerate(shapes)]
self.weights = [self.iterations] + moments
for p, g, m in zip(params, grads, moments):
v = self.momentum * m - lr * g # velocity
self.updates.append(K.update(m, v))
if self.nesterov:
new_p = p + self.momentum * v - lr * g
else:
new_p = p + v
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
### own changes ###
if self.armijo:
inputs = (model._feed_inputs +
model._feed_targets +
model._feed_sample_weights)
input_layer = model.layers[0].input
armijo_function = K.function(inputs=input_layer, outputs=[loss],
updates=self.updates,name='armijo')
loss_next= armijo_function(inputs)
[....change updates if learning rate was not good enough...]
return self.updates
Unfortunately, I don't understand the error message when trying to calculate "loss_next":
tensorflow.python.framework.errors_impl.InvalidArgumentError: Requested Tensor connection between nodes "conv2d_1_input" and "conv2d_1_input" would create a cycle.
Two questions here:
how to access the current batch I am working on? The forward calculation should only consider the actual batch and as the gradients also belong only to that batch.
any better ideas to not use K.function for updating and evaluating a forward pass to calculate the loss function on that batch?
Anyone who can help? Thanks in advance.
how to access the current batch I am working on? The forward calculation should only consider the actual batch and as the gradients also belong only to that batch.
For this you can use batch_size = Total training records in model.fit() so that every epoch has just one forward pass and back propagation. Thus you can analysis the gradients on epoch 1 and modify the learning rate for epoch 2 OR if you are using the custom training loop then modify the code accordingly.
any better ideas to not use K.function for updating and evaluating a forward pass to calculate the loss function on that batch?
I do not recall any other option to evaluate gradient apart from using from tensorflow.keras import backend as K in tensorflow version 1.x. The best option is to update tensorflow to latest version 2.2.0 and use tf.GradientTape.
Would recommend to go through this answer to capture gradients using from tensorflow.keras import backend as K in tensorflow 1.x.
Below is a sample code which is almost similar to your requirement. I am using tensorflow version 2.2.0. You can build your requirements from this program.
We are doing below functions in the program -
We are altering the Learning rate after every epoch. You can do that using callbacks argument of model.fit. Here I am incrementing learning rate by 0.01 for every epoch using tf.keras.callbacks.LearningRateScheduler and also displaying it at end of every epoch using tf.keras.callbacks.Callback.
Computing the gradient using tf.GradientTape() after end of every epoch. We are collecting the grads of every epoch to a list using append.
Also have set batch_size=len(train_images)as per your requirement.
Note : I am training on just 500 records from Cifar dataset due to memory constraints.
Code -
%tensorflow_version 2.x
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import backend as K
import os
import numpy as np
import matplotlib.pyplot as plt
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.cifar10.load_data()
train_images = train_images[:500]
train_labels = train_labels[:500]
test_images = test_images[:50]
test_labels = test_labels[:50]
model = Sequential([
Conv2D(16, 3, padding='same', activation='relu', input_shape=(32, 32, 3)),
MaxPooling2D(),
Conv2D(32, 3, padding='same', activation='relu'),
MaxPooling2D(),
Conv2D(64, 3, padding='same', activation='relu'),
MaxPooling2D(),
Flatten(),
Dense(512, activation='relu'),
Dense(10)
])
lr = 0.01
adam = Adam(lr)
# Define the Gradient Fucntion
epoch_gradient = []
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# Define the Required Callback Function
class GradientCalcCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
with tf.GradientTape() as tape:
logits = model(train_images, training=True)
loss = loss_fn(train_labels, logits)
grad = tape.gradient(loss, model.trainable_weights)
model.optimizer.apply_gradients(zip(grad, model.trainable_variables))
epoch_gradient.append(grad)
gradcalc = GradientCalcCallback()
# Define the Required Callback Function
class printlearningrate(tf.keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs={}):
optimizer = self.model.optimizer
lr = K.eval(optimizer.lr)
Epoch_count = epoch + 1
print('\n', "Epoch:", Epoch_count, ', LR: {:.2f}'.format(lr))
printlr = printlearningrate()
def scheduler(epoch):
optimizer = model.optimizer
return K.eval(optimizer.lr + 0.01)
updatelr = tf.keras.callbacks.LearningRateScheduler(scheduler)
model.compile(optimizer=adam,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
epochs = 10
history = model.fit(train_images, train_labels, epochs=epochs, batch_size=len(train_images),
validation_data=(test_images, test_labels),
callbacks = [printlr,updatelr,gradcalc])
# (7) Convert to a 2 dimensiaonal array of (epoch, gradients) type
gradient = np.asarray(epoch_gradient)
print("Total number of epochs run:", epochs)
print("Gradient Array has the shape:",gradient.shape)
Output -
Epoch: 1 , LR: 0.01
Epoch 1/10
1/1 [==============================] - 0s 427ms/step - loss: 30.1399 - accuracy: 0.0820 - val_loss: 2114.8201 - val_accuracy: 0.1800 - lr: 0.0200
Epoch: 2 , LR: 0.02
Epoch 2/10
1/1 [==============================] - 0s 329ms/step - loss: 141.6176 - accuracy: 0.0920 - val_loss: 41.7008 - val_accuracy: 0.0400 - lr: 0.0300
Epoch: 3 , LR: 0.03
Epoch 3/10
1/1 [==============================] - 0s 328ms/step - loss: 4.1428 - accuracy: 0.1160 - val_loss: 2.3883 - val_accuracy: 0.1800 - lr: 0.0400
Epoch: 4 , LR: 0.04
Epoch 4/10
1/1 [==============================] - 0s 329ms/step - loss: 2.3545 - accuracy: 0.1060 - val_loss: 2.3471 - val_accuracy: 0.1800 - lr: 0.0500
Epoch: 5 , LR: 0.05
Epoch 5/10
1/1 [==============================] - 0s 340ms/step - loss: 2.3208 - accuracy: 0.1060 - val_loss: 2.3047 - val_accuracy: 0.1800 - lr: 0.0600
Epoch: 6 , LR: 0.06
Epoch 6/10
1/1 [==============================] - 0s 331ms/step - loss: 2.3048 - accuracy: 0.1300 - val_loss: 2.3069 - val_accuracy: 0.0600 - lr: 0.0700
Epoch: 7 , LR: 0.07
Epoch 7/10
1/1 [==============================] - 0s 337ms/step - loss: 2.3041 - accuracy: 0.1340 - val_loss: 2.3432 - val_accuracy: 0.0600 - lr: 0.0800
Epoch: 8 , LR: 0.08
Epoch 8/10
1/1 [==============================] - 0s 341ms/step - loss: 2.2871 - accuracy: 0.1400 - val_loss: 2.6009 - val_accuracy: 0.0800 - lr: 0.0900
Epoch: 9 , LR: 0.09
Epoch 9/10
1/1 [==============================] - 1s 515ms/step - loss: 2.2810 - accuracy: 0.1440 - val_loss: 2.8530 - val_accuracy: 0.0600 - lr: 0.1000
Epoch: 10 , LR: 0.10
Epoch 10/10
1/1 [==============================] - 0s 343ms/step - loss: 2.2954 - accuracy: 0.1300 - val_loss: 2.3049 - val_accuracy: 0.0600 - lr: 0.1100
Total number of epochs run: 10
Gradient Array has the shape: (10, 10)
Hope this answers your question. Happy Learning.

Significantly higher testing accuracy on mnist with keras than tensorflow.keras

I was verifying with a basic example my TensorFlow (v2.2.0), Cuda (10.1), and cudnn (libcudnn7-dev_7.6.5.32-1+cuda10.1_amd64.deb) and I'm getting weird results...
When running the following example in Keras as shown in https://keras.io/examples/mnist_cnn/ I get the ~99% acc #validation. When I adapt the imports run via the TensorFlow I get only 86%.
I might be forgetting something.
To run using tensorflow:
from __future__ import print_function
import tensorflow as tf
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras import backend as K
batch_size = 128
num_classes = 10
epochs = 12
# input image dimensions
img_rows, img_cols = 28, 28
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = tf.keras.utils.to_categorical(y_train, num_classes)
y_test = tf.keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=tf.keras.losses.categorical_crossentropy,
optimizer=tf.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
Sadly, I get the following output:
Epoch 2/12
469/469 [==============================] - 3s 6ms/step - loss: 2.2245 - accuracy: 0.2633 - val_loss: 2.1755 - val_accuracy: 0.4447
Epoch 3/12
469/469 [==============================] - 3s 7ms/step - loss: 2.1485 - accuracy: 0.3533 - val_loss: 2.0787 - val_accuracy: 0.5147
Epoch 4/12
469/469 [==============================] - 3s 6ms/step - loss: 2.0489 - accuracy: 0.4214 - val_loss: 1.9538 - val_accuracy: 0.6021
Epoch 5/12
469/469 [==============================] - 3s 6ms/step - loss: 1.9224 - accuracy: 0.4845 - val_loss: 1.7981 - val_accuracy: 0.6611
Epoch 6/12
469/469 [==============================] - 3s 6ms/step - loss: 1.7748 - accuracy: 0.5376 - val_loss: 1.6182 - val_accuracy: 0.7039
Epoch 7/12
469/469 [==============================] - 3s 6ms/step - loss: 1.6184 - accuracy: 0.5750 - val_loss: 1.4296 - val_accuracy: 0.7475
Epoch 8/12
469/469 [==============================] - 3s 7ms/step - loss: 1.4612 - accuracy: 0.6107 - val_loss: 1.2484 - val_accuracy: 0.7719
Epoch 9/12
469/469 [==============================] - 3s 6ms/step - loss: 1.3204 - accuracy: 0.6402 - val_loss: 1.0895 - val_accuracy: 0.7945
Epoch 10/12
469/469 [==============================] - 3s 6ms/step - loss: 1.2019 - accuracy: 0.6650 - val_loss: 0.9586 - val_accuracy: 0.8097
Epoch 11/12
469/469 [==============================] - 3s 7ms/step - loss: 1.1050 - accuracy: 0.6840 - val_loss: 0.8552 - val_accuracy: 0.8216
Epoch 12/12
469/469 [==============================] - 3s 7ms/step - loss: 1.0253 - accuracy: 0.7013 - val_loss: 0.7734 - val_accuracy: 0.8337
Test loss: 0.7734305262565613
Test accuracy: 0.8337000012397766
Nowhere near 99.25% as when I import Keras.
What am I missing?
Discrepancy in optimiser parameters between keras and tensorflow.keras
So the crux of the issue lies in the different default parameters for the Adadelta optimisers in Keras and Tensorflow. Specifically, the different learning rates. We can see this with a simple check. Using the Keras version of the code, print(keras.optimizers.Adadelta().get_config()) outpus
{'learning_rate': 1.0, 'rho': 0.95, 'decay': 0.0, 'epsilon': 1e-07}
And in the Tensorflow version, print(tf.optimizers.Adadelta().get_config() gives us
{'name': 'Adadelta', 'learning_rate': 0.001, 'decay': 0.0, 'rho': 0.95, 'epsilon': 1e-07}
As we can see, there is a discrepancy between the learning rates for the Adadelta optimisers. Keras has a default learning rate of 1.0 while Tensorflow has a default learning rate of 0.001 (consistent with their other optimisers).
Effects of a higher learning rate
Since the Keras version of the Adadelta optimiser has a larger learning rate, it converges much faster and achieves a high accuracy within 12 epochs, while the Tensorflow Adadelta optimiser requires a longer training time. If you increased the number of training epochs, the Tensorflow model could potentially achieve a 99% accuracy as well.
The fix
But instead of increasing the training time, we can simply initialise the Tensorflow model to behave in a similar way to the Keras model by changing the learning rate of Adadelta to 1.0. i.e.
model.compile(
loss=tf.keras.losses.categorical_crossentropy,
optimizer=tf.optimizers.Adadelta(learning_rate=1.0), # Note the new learning rate
metrics=['accuracy'])
Making this change, we get the following performance running on Tensorflow:
Epoch 12/12
60000/60000 [==============================] - 102s 2ms/sample - loss: 0.0287 - accuracy: 0.9911 - val_loss: 0.0291 - val_accuracy: 0.9907
Test loss: 0.029134796149221757
Test accuracy: 0.9907
which is close to the desired 99.25% accuracy.
p.s. Incidentally, it seems that the different default parameters between Keras and Tensorflow is a known issue that was fixed but then reverted:
https://github.com/keras-team/keras/pull/12841 software development is hard.

Train accuracy improving but validation remains unchanged?

I am using TF 2.0. I was trying to train a network on my own data. It was not going well. The validation accuracy was close to 0 and stagnant. I tried many regularizations to no effect. Then I tried training a network on 3 classes of data where all images in each class are the same so as to eliminate the possibility of variability. But this is not working either. Since all in-class images are the same, I would expect the validation accuracy to perfectly match the training accuracy since there is no new data. Why is that not the case? Here is my code:
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D
from tensorflow.keras.applications.mobilenet import preprocess_input
import matplotlib.pyplot as plt
base_model = tf.keras.applications.MobileNet(weights='imagenet', include_top=False)
def turn_off(n):
for layer in model.layers[:n]:
layer.trainable = False
for layer in model.layers[n:]:
layer.trainable = True
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(1024, activation='relu')(
x) # we add dense layers so that the model can learn more complex functions and classify for better results.
x = Dense(1024, activation='relu')(x) # dense layer 2
x = Dense(512, activation='relu')(x) # dense layer 3
preds = Dense(3, activation='softmax')(x) # final layer with softmax activation
model = Model(inputs=base_model.input, outputs=preds)
turn_off(87)
train_datagen = ImageDataGenerator(preprocessing_function=preprocess_input,
rescale=1. / 255,
validation_split=0.2) # set validation split
train_generator = train_datagen.flow_from_directory(
'/users/josh.flori/desktop/colors/',
target_size=(224, 224),
batch_size=32,
color_mode='rgb',
class_mode='categorical',
subset='training',
shuffle=True) # set as training data
validation_generator = train_datagen.flow_from_directory(
'/users/josh.flori/desktop/colors/',
target_size=(224, 224),
batch_size=32,
color_mode='rgb',
class_mode='categorical',
subset='validation',
shuffle=True) # set as validation data
# Adam optimizer
# loss function will be categorical cross entropy
# evaluation metric will be accuracy
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit_generator(
train_generator,
steps_per_epoch=train_generator.samples // train_generator.batch_size,
validation_data=validation_generator,
validation_steps=validation_generator.samples // train_generator.batch_size,
epochs=6)
Here is the training output
9/9 [==============================] - 19s 2s/step - loss: 0.2645 - accuracy: 0.9134 - val_loss: 1.6668 - val_accuracy: 0.3438
Epoch 2/6
9/9 [==============================] - 20s 2s/step - loss: 0.0417 - accuracy: 0.9567 - val_loss: 2.6176 - val_accuracy: 0.3438
Epoch 3/6
9/9 [==============================] - 17s 2s/step - loss: 0.4771 - accuracy: 0.9422 - val_loss: 4.0694 - val_accuracy: 0.3438
Epoch 4/6
9/9 [==============================] - 18s 2s/step - loss: 0.0000e+00 - accuracy: 1.0000 - val_loss: 2.1304 - val_accuracy: 0.3125
Epoch 5/6
9/9 [==============================] - 18s 2s/step - loss: 9.7658e-07 - accuracy: 1.0000 - val_loss: 3.1633 - val_accuracy: 0.3125
Epoch 6/6
9/9 [==============================] - 18s 2s/step - loss: 2.2571e-05 - accuracy: 1.0000 - val_loss: 3.4949 - val_accuracy: 0.3125
My image folders look like this
where there are exactly 128 identical images per folder.
I've been reading all day, trying different images, I can't seem to get anywhere. What is causing this particular behavior? It has to be something obvious but I'm not sure.