Classification with PyTorch is much slower than Tensorflow: 42min vs. 11min - tensorflow

I have been a Tensorflow user and start to use Pytorch. As a trial, I implemented simple classification tasks with both libraries.
However, PyTorch is much slower than Tensorflow: Pytorch takes 42min while TensorFlow 11min. I referred to PyTorch official Tutorial, and made only little change from it.
Could anyone share some advice for this problem?
Here is the summary what I tried.
environment: Colab Pro+
dataset: Cifar10
classifier: VGG16
optimizer: Adam
loss: crossentropy
batch size: 32
PyTorch
Code:
import torch, torchvision
from torch import nn
from torchvision import transforms, models
from tqdm import tqdm
import time, copy
trans = transforms.Compose([transforms.Resize((224, 224)),
transforms.ToTensor(),])
data = {phase: torchvision.datasets.CIFAR10('./', train = (phase=='train'), transform=trans, download=True) for phase in ['train', 'test']}
dataloaders = {phase: torch.utils.data.DataLoader(data[phase], batch_size=32, shuffle=True) for phase in ['train', 'test']}
def train_model(model, criterion, optimizer, dataloaders, device, num_epochs=5):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'test']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in tqdm(iter(dataloaders[phase])):
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / len(dataloaders[phase])
epoch_acc = running_corrects.double() / len(dataloaders[phase])
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'test' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = models.vgg16(pretrained=False)
model = model.to(device)
model = train_model(model=model,
criterion=nn.CrossEntropyLoss(),
optimizer=torch.optim.Adam(model.parameters(), lr=0.001),
dataloaders=dataloaders,
device=device,
)
Result:
Epoch 0/4
----------
0%| | 0/1563 [00:00<?, ?it/s]/usr/local/lib/python3.7/dist-packages/torch/nn/functional.py:718: UserWarning: Named tensors and all their associated APIs are an experimental feature and subject to change. Please do not use them for anything important until they are released as stable. (Triggered internally at /pytorch/c10/core/TensorImpl.h:1156.)
return torch.max_pool2d(input, kernel_size, stride, padding, dilation, ceil_mode)
100%|██████████| 1563/1563 [07:50<00:00, 3.32it/s]
train Loss: 75.5199 Acc: 3.2809
100%|██████████| 313/313 [00:38<00:00, 8.11it/s]
test Loss: 73.7274 Acc: 3.1949
Epoch 1/4
----------
100%|██████████| 1563/1563 [07:50<00:00, 3.33it/s]
train Loss: 73.8162 Acc: 3.2514
100%|██████████| 313/313 [00:38<00:00, 8.13it/s]
test Loss: 73.6114 Acc: 3.1949
Epoch 2/4
----------
100%|██████████| 1563/1563 [07:49<00:00, 3.33it/s]
train Loss: 73.7741 Acc: 3.1369
100%|██████████| 313/313 [00:38<00:00, 8.11it/s]
test Loss: 73.5873 Acc: 3.1949
Epoch 3/4
----------
100%|██████████| 1563/1563 [07:49<00:00, 3.33it/s]
train Loss: 73.7493 Acc: 3.1331
100%|██████████| 313/313 [00:38<00:00, 8.12it/s]
test Loss: 73.6191 Acc: 3.1949
Epoch 4/4
----------
100%|██████████| 1563/1563 [07:49<00:00, 3.33it/s]
train Loss: 73.7289 Acc: 3.1939
100%|██████████| 313/313 [00:38<00:00, 8.13it/s]test Loss: 73.5955 Acc: 3.1949
Training complete in 42m 22s
Best val Acc: 3.194888
Tensorflow
Code:
import tensorflow_datasets as tfds
from tensorflow.keras import applications, models
import tensorflow as tf
import time
ds_test, ds_train = tfds.load('cifar10', split=['test', 'train'])
def resize(ip):
image = ip['image']
label = ip['label']
image = tf.image.resize(image, (224, 224))
image = tf.expand_dims(image,0)
label = tf.one_hot(label,10)
label = tf.expand_dims(label,0)
return (image, label)
ds_train_ = ds_train.map(resize)
ds_test_ = ds_test.map(resize)
model = applications.vgg16.VGG16(input_shape = (224, 224, 3), weights=None, classes=10)
model.compile(optimizer='adam', loss = 'categorical_crossentropy', metrics= ['accuracy'])
batch_size = 32
since = time.time()
history = model.fit(ds_train_,
batch_size = batch_size,
steps_per_epoch = len(ds_train)//batch_size,
epochs = 5,
validation_steps = len(ds_test),
validation_data = ds_test_,
shuffle = True,)
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format( time_elapsed // 60, time_elapsed % 60 ))
Result:
Epoch 1/5
1562/1562 [==============================] - 125s 69ms/step - loss: 36.9022 - accuracy: 0.1069 - val_loss: 2.3031 - val_accuracy: 0.1000
Epoch 2/5
1562/1562 [==============================] - 129s 83ms/step - loss: 2.3031 - accuracy: 0.1005 - val_loss: 2.3033 - val_accuracy: 0.1000
Epoch 3/5
1562/1562 [==============================] - 129s 83ms/step - loss: 2.3035 - accuracy: 0.1069 - val_loss: 2.3031 - val_accuracy: 0.1000
Epoch 4/5
1562/1562 [==============================] - 129s 83ms/step - loss: 2.3038 - accuracy: 0.1024 - val_loss: 2.3030 - val_accuracy: 0.1000
Epoch 5/5
1562/1562 [==============================] - 129s 83ms/step - loss: 2.3028 - accuracy: 0.1024 - val_loss: 2.3033 - val_accuracy: 0.1000
Training complete in 11m 23s

It is because in your tensorflow codes, the data pipeline is feeding a batch of 1 image into the model per step instead of a batch of 32 images.
Passing batch_size into model.fit does not really control the batch size when the data is in the form of datasets. The reason why it showed a seemingly correct steps per epoch from the log is that you passed steps_per_epoch into model.fit.
To correctly set the batch size:
ds_test, ds_train = tfds.load('cifar10', split=['test', 'train'])
def resize(ip):
image = ip['image']
label = ip['label']
image = tf.image.resize(image, (224, 224))
label = tf.one_hot(label,10)
return (image, label)
train_size=len(ds_train)
test_size=len(ds_test)
ds_train_ = ds_train.shuffle(train_size).batch(32).map(resize)
ds_test_ = ds_test.shuffle(test_size).batch(32).map(resize)
model.fit call:
history = model.fit(ds_train_,
epochs = 1,
validation_data = ds_test_)
After fixed the problem, tensorflow got similar speed performance with pytorch. In my machine, pytorch took ~27 minutes per epoch while tensorflow took ~24 minutes per epoch.
According to the benchmarks from NVIDIA, pytorch and tensorflow had similar speed performance in most popular deep learning applications with real-world datasets and problem size. (Reference: https://developer.nvidia.com/deep-learning-performance-training-inference)

Related

Is the loss function wrong in the following code for binary classification of images using soft labels? Or is there some other problem?

We are using CNN to classify images with labels 0 and 1 in tensorflow.
However, in reality, images have probability values between 0 and 1, not one-hot labels of 0 and 1. Images with probabilities in the range [0, 0.5) are labeled 0, and images in the range [0.5, 1.0] are labeled 1. I want to check whether the classification performance is better if binary classification is performed using soft labels between 0 and 1 instead of one-hot labels.
The code below is an example of binary classification only with data labeled 0 and 1 in the cifar10 dataset.
In the code below, the accuracy is about 98% without 'making soft labels part', but about 48% with 'making soft labels part'.
Should I modify the 'BinaryCrossEntropy_custom' function, which is the loss function, to solve the problem? Or is something else wrong?
This answer says that using logits solves it. I understand soft_labels argument, but what value should I put in logits argument in this example code?
from tensorflow.keras import datasets, layers, models
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.keras import optimizers
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.applications import vgg16
from tensorflow.keras.models import Model
from tensorflow.keras import backend as K
# Rewrite the binary cross entropy function. We will modify this function to return a loss that fits the soft label later.
def BinaryCrossEntropy_custom(y_true, y_pred):
y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())
term_0 = (1 - y_true) * K.log(1 - y_pred + K.epsilon())
term_1 = y_true * K.log(y_pred + K.epsilon())
return -K.mean(term_0 + term_1, axis=0)
(train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data()
# Normalize pixel values to be between 0 and 1
train_images, test_images = train_images / 255.0, test_images / 255.0
# Only data with labels 0 and 1 are used.
train_ind01 = np.where((train_labels == 0) | (train_labels == 1))[0]
test_ind01 = np.where((test_labels == 0) | (test_labels == 1))[0]
train_images = train_images[train_ind01, :, :, :]
test_images = test_images[test_ind01, :, :, :]
train_labels = train_labels[train_ind01, :]
test_labels = test_labels[test_ind01, :]
train_labels = np.array(train_labels).astype('float64')
test_labels = np.array(test_labels).astype('float64')
# making soft labels part start
# Samples with label 0 are replaced with labels in the range [0,0.2],
# and samples with label 1 are replaced by labels in the range [0.8, 1.0].
sampl_train = np.random.uniform(low=-0.2, high=0.2, size=train_labels.shape)
sampl_test = np.random.uniform(low=-0.2, high=0.2, size=test_labels.shape)
train_labels = train_labels + sampl_train
test_labels = test_labels + sampl_test
train_labels = np.clip(train_labels, 0.0, 1.0)
test_labels = np.clip(test_labels, 0.0, 1.0)
# making soft labels part end
vgg = vgg16.VGG16(include_top=False, weights='imagenet', input_shape=(32, 32, 3))
output = vgg.layers[-1].output
output = layers.Flatten()(output)
output = layers.Dense(512, activation='relu')(output)
output = layers.Dropout(0.2)(output)
output = layers.Dense(256, activation='relu')(output)
output = layers.Dropout(0.2)(output)
predictions = layers.Dense(units=1, activation="sigmoid")(output)
model = Model(inputs=vgg.input, outputs=predictions)
model.compile(optimizer=Adam(learning_rate=.0001), loss=BinaryCrossEntropy_custom, metrics=['accuracy'])
history = model.fit(train_images, train_labels, epochs=100,
validation_data=(test_images, test_labels))
plt.plot(history.history['accuracy'], label='accuracy')
plt.plot(history.history['val_accuracy'], label='val_accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.ylim([0.5, 1])
plt.legend(loc='lower right')
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print(test_acc)
The console output with making soft labels part is
Epoch 1/100
2022-09-16 15:29:29.136931: I tensorflow/stream_executor/cuda/cuda_dnn.cc:366] Loaded cuDNN version 8101
313/313 [==============================] - 17s 42ms/step - loss: 0.2951 - accuracy: 0.4779 - val_loss: 0.2775 - val_accuracy: 0.4650
Epoch 2/100
313/313 [==============================] - 12s 38ms/step - loss: 0.2419 - accuracy: 0.4931 - val_loss: 0.2488 - val_accuracy: 0.4695
Epoch 3/100
313/313 [==============================] - 12s 39ms/step - loss: 0.2290 - accuracy: 0.4978 - val_loss: 0.2424 - val_accuracy: 0.4740
Epoch 4/100
313/313 [==============================] - 12s 39ms/step - loss: 0.2161 - accuracy: 0.5002 - val_loss: 0.2404 - val_accuracy: 0.4765
Epoch 5/100
313/313 [==============================] - 12s 39ms/step - loss: 0.2139 - accuracy: 0.5007 - val_loss: 0.2620 - val_accuracy: 0.4730
Epoch 6/100
313/313 [==============================] - 12s 38ms/step - loss: 0.2118 - accuracy: 0.5023 - val_loss: 0.2480 - val_accuracy: 0.4745
Epoch 7/100
313/313 [==============================] - 12s 38ms/step - loss: 0.2097 - accuracy: 0.5019 - val_loss: 0.2350 - val_accuracy: 0.4775
Epoch 8/100
313/313 [==============================] - 12s 39ms/step - loss: 0.2098 - accuracy: 0.5024 - val_loss: 0.2289 - val_accuracy: 0.4780
Epoch 9/100
313/313 [==============================] - 12s 38ms/step - loss: 0.2034 - accuracy: 0.5039 - val_loss: 0.2364 - val_accuracy: 0.4780
Epoch 10/100
313/313 [==============================] - 12s 39ms/step - loss: 0.2025 - accuracy: 0.5040 - val_loss: 0.2481 - val_accuracy: 0.4720

Why is the keras model less accurate and not recognized?

I downloaded the mnist dataset (jpg) and created a model file with .hdf (.h5).
Images were recognized using the .h5 model, but the recognition rate is low..
.
The accuracy is low when actual compile..
Did I do something wrong?..
The image used the 28x28 image of the grayscale..
import os
import cv2
import numpy as np
import tensorflow as tf
from PIL import Image
from matplotlib import pyplot as plt
import matplotlib.image as mpimg
import random
FILENAME = 'model.h5'
WIDTH = 28
HEIGHT = 28
EPOCHES = 100
BATCH_SIZE = 32
def create_dataset(img_folder):
img_data_array=[]
class_name=[]
for path in os.listdir(img_folder):
if path == ".DS_Store":
continue
for file in os.listdir(os.path.join(img_folder, path)):
if file == ".DS_Store":
continue
image_path = os.path.join(img_folder, path, file)
image = cv2.imread( image_path, cv2.IMREAD_UNCHANGED)
image = cv2.resize(image, (HEIGHT, WIDTH),interpolation = cv2.INTER_AREA)
image = np.array(image)
image = image.astype('float32')
image /= 255
img_data_array.append(image)
class_name.append(path)
return img_data_array, class_name
img_data, class_name = create_dataset(r'/Users/animalman/Documents/test/grayscale/train')
test, test_class_name = create_dataset(r'/Users/animalman/Documents/test/grayscale/test')
target_dict = {k: v for v, k in enumerate(np.unique(class_name))}
target_val = [target_dict[class_name[i]] for i in range(len(class_name))]
test_dict = {k: v for v, k in enumerate(np.unique(test_class_name))}
test_val = [test_dict[test_class_name[i]] for i in range(len(test_class_name))]
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# tensor
history = model.fit(x=tf.cast(np.array(img_data), tf.float64), y=tf.cast(list(map(int,target_val)),tf.int32), epochs=EPOCHES, batch_size=BATCH_SIZE, validation_split=0.33)
evaluate = model.evaluate(x=tf.cast(np.array(img_data), tf.float64), y=tf.cast(list(map(int,target_val)),tf.int32), batch_size=BATCH_SIZE)
print('Train:', evaluate)
test_evaluate = model.evaluate(x=tf.cast(np.array(test), tf.float64), y=tf.cast(list(map(int,test_val)),tf.int32), batch_size=BATCH_SIZE)
print('Test:', test_evaluate)
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
test_loss, test_acc = model.evaluate(x_test, y_test)
print('mnist', test_acc)
model.save(FILENAME)
...
...
Epoch 98/100
1257/1257 [==============================] - 3s 2ms/step - loss: 5.5190e-08 - accuracy: 1.0000 - val_loss: 43.3440 - val_accuracy: 0.1135
Epoch 99/100
1257/1257 [==============================] - 3s 2ms/step - loss: 4.0746e-08 - accuracy: 1.0000 - val_loss: 43.3764 - val_accuracy: 0.1136
Epoch 100/100
1257/1257 [==============================] - 3s 2ms/step - loss: 2.3033e-08 - accuracy: 1.0000 - val_loss: 43.4628 - val_accuracy: 0.1136
..
..
Train: [14.343465805053711, 0.7074833512306213]
313/313 [==============================] - 0s 579us/step - loss: 14.7582 - accuracy: 0.6990
Test: [14.758186340332031, 0.6990000009536743]
313/313 [==============================] - 0s 850us/step - loss: 3887.2236 - accuracy: 0.6991
mnist : 0.6991000175476074
From here
Epoch 100/100 1257/1257 [==============================] - 3s 2ms/step - loss: 2.3033e-08 - accuracy: 1.0000 - val_loss: 43.4628 - val_accuracy: 0.1136
You can see the training acc is 1.0 and validating acc is 0.1136
-> Your model is overfitting to the training dataset.
About the overfitting: https://en.wikipedia.org/wiki/Overfitting

Tensorflow 2 Metrics produce wrong results with 2 GPUs

I took this piece of code from tensorflow documentation about distributed training with custom loop https://www.tensorflow.org/tutorials/distribute/custom_training and I just fixed it to work with the tf.keras.metrics.AUC and run it with 2 GPUS (2 Nvidia V100 from a DGX machine).
# Import TensorFlow
import tensorflow as tf
# Helper libraries
import numpy as np
print(tf.__version__)
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
# Adding a dimension to the array -> new shape == (28, 28, 1)
# We are doing this because the first layer in our model is a convolutional
# layer and it requires a 4D input (batch_size, height, width, channels).
# batch_size dimension will be added later on.
train_images = train_images[..., None]
test_images = test_images[..., None]
# One hot
train_labels = tf.keras.utils.to_categorical(train_labels, 10)
test_labels = tf.keras.utils.to_categorical(test_labels, 10)
# Getting the images in [0, 1] range.
train_images = train_images / np.float32(255)
test_images = test_images / np.float32(255)
# If the list of devices is not specified in the
# `tf.distribute.MirroredStrategy` constructor, it will be auto-detected.
GPUS = [0, 1]
devices = ["/gpu:" + str(gpu_id) for gpu_id in GPUS]
strategy = tf.distribute.MirroredStrategy(devices=devices)
print ('Number of devices: {}'.format(strategy.num_replicas_in_sync))
BUFFER_SIZE = len(train_images)
BATCH_SIZE_PER_REPLICA = 64
GLOBAL_BATCH_SIZE = BATCH_SIZE_PER_REPLICA * strategy.num_replicas_in_sync
EPOCHS = 10
train_dataset = tf.data.Dataset.from_tensor_slices((train_images, train_labels)).shuffle(BUFFER_SIZE).batch(GLOBAL_BATCH_SIZE)
test_dataset = tf.data.Dataset.from_tensor_slices((test_images, test_labels)).batch(GLOBAL_BATCH_SIZE)
train_dist_dataset = strategy.experimental_distribute_dataset(train_dataset)
test_dist_dataset = strategy.experimental_distribute_dataset(test_dataset)
def create_model():
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(64, 3, activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
return model
with strategy.scope():
# Set reduction to `none` so we can do the reduction afterwards and divide by
# global batch size.
loss_object = tf.keras.losses.CategoricalCrossentropy(
from_logits=True,
reduction=tf.keras.losses.Reduction.NONE)
def compute_loss(labels, predictions):
per_example_loss = loss_object(labels, predictions)
return tf.nn.compute_average_loss(per_example_loss, global_batch_size=GLOBAL_BATCH_SIZE)
with strategy.scope():
test_loss = tf.keras.metrics.Mean(name='test_loss')
train_accuracy = tf.keras.metrics.CategoricalAccuracy(
name='train_accuracy')
test_accuracy = tf.keras.metrics.CategoricalAccuracy(
name='test_accuracy')
train_auc = tf.keras.metrics.AUC(name='train_auc')
test_auc = tf.keras.metrics.AUC(name='test_auc')
# model, optimizer, and checkpoint must be created under `strategy.scope`.
with strategy.scope():
model = create_model()
optimizer = tf.keras.optimizers.Adam()
def train_step(inputs):
images, labels = inputs
with tf.GradientTape() as tape:
predictions = model(images, training=True)
loss = compute_loss(labels, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
train_accuracy(labels, predictions)
train_auc(labels, predictions)
return loss
def test_step(inputs):
images, labels = inputs
predictions = model(images, training=False)
t_loss = loss_object(labels, predictions)
test_loss.update_state(t_loss)
test_accuracy(labels, predictions)
test_auc(labels, predictions)
# `run` replicates the provided computation and runs it
# with the distributed input.
#tf.function
def distributed_train_step(dataset_inputs):
per_replica_losses = strategy.run(train_step, args=(dataset_inputs,))
return strategy.reduce(tf.distribute.ReduceOp.SUM, per_replica_losses,
axis=None)
#tf.function
def distributed_test_step(dataset_inputs):
return strategy.run(test_step, args=(dataset_inputs,))
for epoch in range(EPOCHS):
# TRAIN LOOP
total_loss = 0.0
num_batches = 0
for x in train_dist_dataset:
total_loss += distributed_train_step(x)
num_batches += 1
train_loss = total_loss / num_batches
# TEST LOOP
for x in test_dist_dataset:
distributed_test_step(x)
template = ("Epoch {}, Loss: {}, Accuracy: {}, AUC: {},"
"Test Loss: {}, Test Accuracy: {}, Test AUC: {}")
print (template.format(epoch+1,
train_loss, train_accuracy.result()*100, train_auc.result()*100,
test_loss.result(), test_accuracy.result()*100, test_auc.result()*100))
test_loss.reset_states()
train_accuracy.reset_states()
test_accuracy.reset_states()
train_auc.reset_states()
test_auc.reset_states()
The problem is that AUC's evaluation is definitely wrong cause it exceeds its range (should be from 0-100) and i get theese results by running the above code for one time:
Epoch 1, Loss: 1.8061423301696777, Accuracy: 66.00833892822266, AUC: 321.8688659667969,Test Loss: 1.742477536201477, Test Accuracy: 72.0999984741211, Test AUC: 331.33709716796875
Epoch 2, Loss: 1.7129968404769897, Accuracy: 74.9816665649414, AUC: 337.37017822265625,Test Loss: 1.7084736824035645, Test Accuracy: 75.52999877929688, Test AUC: 337.1878967285156
Epoch 3, Loss: 1.643971562385559, Accuracy: 81.83333587646484, AUC: 355.96209716796875,Test Loss: 1.6072628498077393, Test Accuracy: 85.3499984741211, Test AUC: 370.603759765625
Epoch 4, Loss: 1.5887378454208374, Accuracy: 87.27833557128906, AUC: 373.6204528808594,Test Loss: 1.5906082391738892, Test Accuracy: 87.13999938964844, Test AUC: 371.9998474121094
Epoch 5, Loss: 1.581775426864624, Accuracy: 88.0, AUC: 373.9468994140625,Test Loss: 1.5964380502700806, Test Accuracy: 86.68000030517578, Test AUC: 371.0227355957031
Epoch 6, Loss: 1.5764907598495483, Accuracy: 88.49166870117188, AUC: 375.2404479980469,Test Loss: 1.5832056999206543, Test Accuracy: 87.94000244140625, Test AUC: 373.41998291015625
Epoch 7, Loss: 1.5698528289794922, Accuracy: 89.19166564941406, AUC: 376.473876953125,Test Loss: 1.5770654678344727, Test Accuracy: 88.58000183105469, Test AUC: 375.5516662597656
Epoch 8, Loss: 1.564456820487976, Accuracy: 89.71833801269531, AUC: 377.8564758300781,Test Loss: 1.5792100429534912, Test Accuracy: 88.27000427246094, Test AUC: 373.1791687011719
Epoch 9, Loss: 1.5612279176712036, Accuracy: 90.02000427246094, AUC: 377.9949645996094,Test Loss: 1.5729509592056274, Test Accuracy: 88.9800033569336, Test AUC: 375.5257263183594
Epoch 10, Loss: 1.5562015771865845, Accuracy: 90.54000091552734, AUC: 378.9789123535156,Test Loss: 1.56815767288208, Test Accuracy: 89.3499984741211, Test AUC: 375.8636474609375
Accuracy is ok but it seems that it's the only one metric that behaves nice. I tried other metrics too but they are not evaluated correctly. It seems that the problems come when using more than one GPU, cause when I run this code with one GPU it produce the right results.
When you use distributed strategy, the metric must be constructed and used inside the strategy.scope() block. So when you want to call the metric.result() method, remember to put it inside the with strategy.scope() block.
with strategy.scope():
print(metric.result())

Forward Pass calculation on current batch in "get_updates" method of Keras SGD Optimizer

I am trying to implement a stochastic armijo rule in the get_gradient method of Keras SGD optimizer.
Therefore, I need to calculate another forward pass to check if the learning_rate chosen was good. I don't want another calculation of the gradients, but I want to use the updated weights.
Using Keras Version 2.3.1 and Tensorflow Version 1.14.0
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
lr = self.learning_rate
if self.initial_decay > 0:
lr = lr * (1. / (1. + self.decay * K.cast(self.iterations,
K.dtype(self.decay))))
# momentum
shapes = [K.int_shape(p) for p in params]
moments = [K.zeros(shape, name='moment_' + str(i))
for (i, shape) in enumerate(shapes)]
self.weights = [self.iterations] + moments
for p, g, m in zip(params, grads, moments):
v = self.momentum * m - lr * g # velocity
self.updates.append(K.update(m, v))
if self.nesterov:
new_p = p + self.momentum * v - lr * g
else:
new_p = p + v
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
### own changes ###
if self.armijo:
inputs = (model._feed_inputs +
model._feed_targets +
model._feed_sample_weights)
input_layer = model.layers[0].input
armijo_function = K.function(inputs=input_layer, outputs=[loss],
updates=self.updates,name='armijo')
loss_next= armijo_function(inputs)
[....change updates if learning rate was not good enough...]
return self.updates
Unfortunately, I don't understand the error message when trying to calculate "loss_next":
tensorflow.python.framework.errors_impl.InvalidArgumentError: Requested Tensor connection between nodes "conv2d_1_input" and "conv2d_1_input" would create a cycle.
Two questions here:
how to access the current batch I am working on? The forward calculation should only consider the actual batch and as the gradients also belong only to that batch.
any better ideas to not use K.function for updating and evaluating a forward pass to calculate the loss function on that batch?
Anyone who can help? Thanks in advance.
how to access the current batch I am working on? The forward calculation should only consider the actual batch and as the gradients also belong only to that batch.
For this you can use batch_size = Total training records in model.fit() so that every epoch has just one forward pass and back propagation. Thus you can analysis the gradients on epoch 1 and modify the learning rate for epoch 2 OR if you are using the custom training loop then modify the code accordingly.
any better ideas to not use K.function for updating and evaluating a forward pass to calculate the loss function on that batch?
I do not recall any other option to evaluate gradient apart from using from tensorflow.keras import backend as K in tensorflow version 1.x. The best option is to update tensorflow to latest version 2.2.0 and use tf.GradientTape.
Would recommend to go through this answer to capture gradients using from tensorflow.keras import backend as K in tensorflow 1.x.
Below is a sample code which is almost similar to your requirement. I am using tensorflow version 2.2.0. You can build your requirements from this program.
We are doing below functions in the program -
We are altering the Learning rate after every epoch. You can do that using callbacks argument of model.fit. Here I am incrementing learning rate by 0.01 for every epoch using tf.keras.callbacks.LearningRateScheduler and also displaying it at end of every epoch using tf.keras.callbacks.Callback.
Computing the gradient using tf.GradientTape() after end of every epoch. We are collecting the grads of every epoch to a list using append.
Also have set batch_size=len(train_images)as per your requirement.
Note : I am training on just 500 records from Cifar dataset due to memory constraints.
Code -
%tensorflow_version 2.x
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import backend as K
import os
import numpy as np
import matplotlib.pyplot as plt
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.cifar10.load_data()
train_images = train_images[:500]
train_labels = train_labels[:500]
test_images = test_images[:50]
test_labels = test_labels[:50]
model = Sequential([
Conv2D(16, 3, padding='same', activation='relu', input_shape=(32, 32, 3)),
MaxPooling2D(),
Conv2D(32, 3, padding='same', activation='relu'),
MaxPooling2D(),
Conv2D(64, 3, padding='same', activation='relu'),
MaxPooling2D(),
Flatten(),
Dense(512, activation='relu'),
Dense(10)
])
lr = 0.01
adam = Adam(lr)
# Define the Gradient Fucntion
epoch_gradient = []
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# Define the Required Callback Function
class GradientCalcCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
with tf.GradientTape() as tape:
logits = model(train_images, training=True)
loss = loss_fn(train_labels, logits)
grad = tape.gradient(loss, model.trainable_weights)
model.optimizer.apply_gradients(zip(grad, model.trainable_variables))
epoch_gradient.append(grad)
gradcalc = GradientCalcCallback()
# Define the Required Callback Function
class printlearningrate(tf.keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs={}):
optimizer = self.model.optimizer
lr = K.eval(optimizer.lr)
Epoch_count = epoch + 1
print('\n', "Epoch:", Epoch_count, ', LR: {:.2f}'.format(lr))
printlr = printlearningrate()
def scheduler(epoch):
optimizer = model.optimizer
return K.eval(optimizer.lr + 0.01)
updatelr = tf.keras.callbacks.LearningRateScheduler(scheduler)
model.compile(optimizer=adam,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
epochs = 10
history = model.fit(train_images, train_labels, epochs=epochs, batch_size=len(train_images),
validation_data=(test_images, test_labels),
callbacks = [printlr,updatelr,gradcalc])
# (7) Convert to a 2 dimensiaonal array of (epoch, gradients) type
gradient = np.asarray(epoch_gradient)
print("Total number of epochs run:", epochs)
print("Gradient Array has the shape:",gradient.shape)
Output -
Epoch: 1 , LR: 0.01
Epoch 1/10
1/1 [==============================] - 0s 427ms/step - loss: 30.1399 - accuracy: 0.0820 - val_loss: 2114.8201 - val_accuracy: 0.1800 - lr: 0.0200
Epoch: 2 , LR: 0.02
Epoch 2/10
1/1 [==============================] - 0s 329ms/step - loss: 141.6176 - accuracy: 0.0920 - val_loss: 41.7008 - val_accuracy: 0.0400 - lr: 0.0300
Epoch: 3 , LR: 0.03
Epoch 3/10
1/1 [==============================] - 0s 328ms/step - loss: 4.1428 - accuracy: 0.1160 - val_loss: 2.3883 - val_accuracy: 0.1800 - lr: 0.0400
Epoch: 4 , LR: 0.04
Epoch 4/10
1/1 [==============================] - 0s 329ms/step - loss: 2.3545 - accuracy: 0.1060 - val_loss: 2.3471 - val_accuracy: 0.1800 - lr: 0.0500
Epoch: 5 , LR: 0.05
Epoch 5/10
1/1 [==============================] - 0s 340ms/step - loss: 2.3208 - accuracy: 0.1060 - val_loss: 2.3047 - val_accuracy: 0.1800 - lr: 0.0600
Epoch: 6 , LR: 0.06
Epoch 6/10
1/1 [==============================] - 0s 331ms/step - loss: 2.3048 - accuracy: 0.1300 - val_loss: 2.3069 - val_accuracy: 0.0600 - lr: 0.0700
Epoch: 7 , LR: 0.07
Epoch 7/10
1/1 [==============================] - 0s 337ms/step - loss: 2.3041 - accuracy: 0.1340 - val_loss: 2.3432 - val_accuracy: 0.0600 - lr: 0.0800
Epoch: 8 , LR: 0.08
Epoch 8/10
1/1 [==============================] - 0s 341ms/step - loss: 2.2871 - accuracy: 0.1400 - val_loss: 2.6009 - val_accuracy: 0.0800 - lr: 0.0900
Epoch: 9 , LR: 0.09
Epoch 9/10
1/1 [==============================] - 1s 515ms/step - loss: 2.2810 - accuracy: 0.1440 - val_loss: 2.8530 - val_accuracy: 0.0600 - lr: 0.1000
Epoch: 10 , LR: 0.10
Epoch 10/10
1/1 [==============================] - 0s 343ms/step - loss: 2.2954 - accuracy: 0.1300 - val_loss: 2.3049 - val_accuracy: 0.0600 - lr: 0.1100
Total number of epochs run: 10
Gradient Array has the shape: (10, 10)
Hope this answers your question. Happy Learning.

model.fit_generator() fails with use_multiprocessing=True

In the code example below, I can train the model only when NOT using multiprocessing.
My generator is straight from the tensorflow.keras.utils.Sequence description https://www.tensorflow.org/api_docs/python/tf/keras/utils/Sequence
Any idea how to fix the generator to allow multiprocessing?
Running on Win 10, tensorflow 1.13.1, python 3.6.8
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
from tensorflow.keras.utils import Sequence
# Generator
class DataGenerator(Sequence):
def __init__(self, dim, batch_size, n_channels):
self.dim = dim
self.batch_size = batch_size
self.n_channels = n_channels
def __len__(self):
return 100
def __getitem__(self, idx):
X = np.random.randn(self.batch_size, self.dim, self.n_channels)
Y = np.random.randn(self.batch_size, self.dim, 1)
return X, Y
dim= 32
batch_size= 64
n_channels= 3
# Generators
training_generator = DataGenerator(dim, batch_size, n_channels)
validation_generator = DataGenerator(dim, batch_size, n_channels)
# Model
model = Sequential()
model.add(layers.GRU(128, return_sequences=True,
batch_input_shape=[None, training_generator.dim, training_generator.n_channels]))
model.add(layers.Dense(1))
model.compile(loss='mse', optimizer='adam')
# This training procedure runs
model.fit_generator(generator=training_generator,
epochs = 2,
steps_per_epoch = 100,
max_queue_size = 32,
validation_data=validation_generator,
validation_steps = 20,
verbose=1)
# This training procedure fails (Only change is that I added the multiprocessing options)
model.fit_generator(generator=training_generator,
epochs = 2,
steps_per_epoch = 100,
max_queue_size = 32,
validation_data=validation_generator,
validation_steps = 20,
verbose=1,
use_multiprocessing=True,
workers=4)
I expected the second fit_generator() call to train the model like the first one. Instead, I get no output, not even an error message.
I tried your code on Ubuntu 18.04.2 LTS machine with python 3.6.8 and tensorflow 1.13.1. It works in both cases as log shown below:
2019-07-13 12:56:17.003119: I tensorflow/stream_executor/dso_loader.cc:152] successfully opened CUDA library libcublas.so.10.0 locally
100/100 [==============================] - 3s 27ms/step - loss: 0.9987
100/100 [==============================] - 10s 103ms/step - loss: 0.9973 - val_loss: 0.9987
Epoch 2/2
100/100 [==============================] - 3s 26ms/step - loss: 0.9955
100/100 [==============================] - 8s 83ms/step - loss: 1.0028 - val_loss: 0.9955
Multiprocessing=True ......
Epoch 1/2
100/100 [==============================] - 3s 32ms/step - loss: 0.9952
100/100 [==============================] - 9s 89ms/step - loss: 0.9962 - val_loss: 0.9952
Epoch 2/2
100/100 [==============================] - 3s 28ms/step - loss: 0.9967
100/100 [==============================] - 9s 86ms/step - loss: 0.9968 - val_loss: 0.9967"
My suggestion is to first try with CPU only mode, by putting BOTH the model and the fit_generator code under "with tf.device('/cpu:0'):". If it works, it would be GPU related issue, such as proper driver, tensorflow with GPU support etc. Most likely, the issue was caused by GPU hanging.