Related
I have trained a model in Tensorflow and am having trouble replicating it in PyTorch. The Tensorflow model achieves near 100% accuracy (the task is simple), but the PyTorch model performs at random. I've spent a while trying to figure this out, and can't understand what the problem could be.
The model is trained for the task of binary classification. Given an input utterance describing a quadrant and a (x, y, z) coordinate, the model has to predict if the (x, z) portion of the coordinate is in the quadrant described. For example, if the input text was "quadrant 1" and the coordinate was (0.5, -, 0.5), then the prediction should be true, but if the region was "quadrant 2" with the same coordinate, then the prediction should be false.
I generated some data and trained the model in Tensorflow using this code:
x_data_placeholder = tf.placeholder(tf.float32, [FLAGS.batch_size, 1], name="x_data")
y_data_placeholder = tf.placeholder(tf.float32, [FLAGS.batch_size, 1], name="y_data")
z_data_placeholder = tf.placeholder(tf.float32, [FLAGS.batch_size, 1], name="z_data")
# text and labels placeholders
text_data = tf.placeholder(tf.int32, [FLAGS.batch_size, maxtextlength])
text_lengths = tf.placeholder(tf.int32, [FLAGS.batch_size])
y_labels_placeholder = tf.placeholder(tf.int64, [FLAGS.batch_size])
# encode text and coordinate
embeddings = tf.Variable(tf.random_uniform([100, embedding_size], -1, -1))
rnn_inputs = tf.nn.embedding_lookup(embeddings, text_data)
rnn_layers = [tf.compat.v1.nn.rnn_cell.LSTMCell(size, initializer=tf.compat.v1.keras.initializers.glorot_normal) for size in [256]]
multi_rnn_cell = tf.compat.v1.nn.rnn_cell.MultiRNNCell(rnn_layers, state_is_tuple=True)
text_outputs, text_fstate = tf.compat.v1.nn.dynamic_rnn(cell=multi_rnn_cell,
inputs=rnn_inputs,
dtype=tf.float32, sequence_length=text_lengths)
# have fully connected layers to map them the input coordinates into the same dimension as the LSTM output layer from above
x_output_layer = tf.compat.v1.layers.dense(x_data_placeholder, units=FLAGS.fc_column_size, activation=tf.nn.relu, name='x_coordinate')
y_output_layer = tf.compat.v1.layers.dense(y_data_placeholder, units=FLAGS.fc_column_size, activation=tf.nn.relu, name='y_coordinate')
z_output_layer = tf.compat.v1.layers.dense(z_data_placeholder, units=FLAGS.fc_column_size, activation=tf.nn.relu, name='z_coordinate')
# add the representations
total_output_layer = x_output_layer + y_output_layer + z_output_layer + lstm_output_layer
# make the predictions with two fully connected layers
fc_1 = tf.compat.v1.layers.dense(total_output_layer, units=FLAGS.hidden_layer_size, activation=tf.nn.relu, name='fc_1')
logits = tf.compat.v1.layers.dense(fc_1, units=FLAGS.output_dims, activation=None, name='logits')
# train the model
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y_labels_placeholder, logits=logits))
optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate, epsilon=1e-7)
gradients, variables = zip(*optimizer.compute_gradients(loss))
gradients, _ = tf.clip_by_global_norm(gradients, FLAGS.gradient_clip_threshold)
optimize = optimizer.apply_gradients(zip(gradients, variables))
# then it'll be trained with sess.run ...
Now for the PyTorch replication:
class BaselineModel(nn.Module):
def __init__(self):
super(BaselineModel, self).__init__()
self.encode_x = nn.Linear(1, embed_size)
self.encode_y = nn.Linear(1, embed_size)
self.encode_z = nn.Linear(1, embed_size)
self._embeddings = nn.Embedding(vocab_size, self.embedding_table_size)
nn.init.uniform_(self._embeddings.weight, -1.0, 1.0)
self.num_layers = 1
self.rnn = nn.LSTM(self.embedding_table_size, self.hidden_size, batch_first=True)
self.fc_after_text_lstm = nn.Linear(self.hidden_size, 100)
self.fc = nn.Linear(100, 256)
self.fc_final = nn.Linear(256, 2)
self.relu_activation = nn.ReLU()
self.softmax = nn.Softmax(dim=1)
def init_hidden(self, batch_size, device='cuda:0'):
# for LSTM, we need # of layers
h_0 = torch.zeros(1, batch_size, self.hidden_size).to(device)
c_0 = torch.zeros(1, batch_size, self.hidden_size).to(device)
return h_0, c_0
def forward(self, input_text, x_coordinate=None, y_coordinate=None, z_coordinate=None):
x_embed = self.relu_activation(self.encode_x(x_coordinate.cuda().to(torch.float32)).cuda())
y_embed = self.relu_activation(self.encode_y(y_coordinate.cuda().to(torch.float32))).cuda()
z_embed = self.relu_activation(self.encode_z(z_coordinate.cuda().to(torch.float32))).cuda()
embeds = self._embeddings(input_text)
embedding, hidden = self.rnn(embeds, self.hidden)
text_fc = self.relu_activation(self.fc_after_text_lstm(embedding[:, -1]))
representations_so_far_added = torch.sum(torch.stack([text_fc, x_embed, y_embed, z_embed]), dim=0)
pre_final_embedding = self.relu_activation(self.fc(representations_so_far_added))
return self.fc_final(pre_final_embedding )
### training code
optimizer = torch.optim.Adam(model.parameters(), lr=0.001, eps=1e-7)
criterion = nn.CrossEntropyLoss()
for input_text, x_coordinate, y_coordinate, z_coordinate, targets, train_data:
optimizer.zero_grad()
pred = model(input_text, x_coordinate=x_coordinate, y_coordinate=y_coordinate, z_coordinate=z_coordinate)
loss = criterion(pred.float(), targets)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 5.0)
optimizer.step()
scheduler.step()
# accuracy evaluation code, this is evaluated over the entire epoch
pred_idx = F.log_softmax(pred, dim=1)
target_labels = targets.cpu().int()
pred_labels = torch.argmax(pred_idx, dim=-1).cpu().data.int()
curr_acc = skm.accuracy_score(target_labels, pred_labels)
If anyone can spot any issue with the PyTorch implementation or maybe tell me what could be wrong, that would be much appreciated! I also tried to load the weights of the Tensorflow model into all the appropriate layers, and performance still struggles in PyTorch! Thanks in advance!
EDIT:
I have created a minimally reproducible example, because I still cannot figure out what the problem is. Any help would be still appreciated!
import torch
import torch.nn as nn
import numpy as np
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
lr = 0.0005
n_epochs = 10
input_dim = 4
hidden_dim = 128
layer_dim = 2
output_dim = 2
batch_size = 50
class FeatureDataSet(torch.utils.data.Dataset):
def __init__(self, x_train, y_train, x_coordinates):
self.x_train = torch.tensor(x_train, dtype=torch.long)
self.y_train = torch.tensor(y_train)
self.x_coordinates = torch.tensor(x_coordinates, dtype=torch.float32)
def __len__(self):
return len(self.y_train)
def __getitem__(self, idx):
return self.x_train[idx], self.y_train[idx], self.x_coordinates[idx]
class RNN(nn.Module):
def __init__(self, input_dim, hidden_dim, layer_dim, output_dim, batch_size):
super().__init__()
self.hidden_dim = hidden_dim
self.layer_dim = layer_dim
# linear layer to encode the coordinate
self.encode_x = nn.Linear(1, hidden_dim).cuda()
self._embeddings = nn.Embedding(40, 100).cuda()
# hidden_dim is 128
# layer_dim is 2
self.lstm = nn.LSTM(100, hidden_dim, layer_dim, batch_first=True).cuda()
self.fc = nn.Linear(2 * hidden_dim, output_dim).cuda()
self.batch_size = batch_size
self.hidden = None
def init_hidden(self, x):
h0 = torch.zeros(self.layer_dim, x.size(0), self.hidden_dim)
c0 = torch.zeros(self.layer_dim, x.size(0), self.hidden_dim)
return [t.cpu() for t in (h0, c0)]
def forward(self, x, x_coordinate):
#initializing the hidden states
h0, c0 = self.init_hidden(x)
embeds = self._embeddings(x)
out, (hn, cn) = self.lstm(embeds.cuda(), (h0.cuda(), c0.cuda()))
x_embed = F.relu(self.encode_x(x_coordinate.cuda().to(torch.float32)).cuda())
representations_so_far_added = torch.cat([out[:, -1, :], x_embed], dim=1)
out = self.fc(representations_so_far_added)
return out
model = RNN(input_dim, hidden_dim, layer_dim, output_dim, batch_size)
criterion = nn.CrossEntropyLoss()
opt = torch.optim.Adam(model.parameters(), lr=0.001)
print('Start model training')
import sklearn.metrics as skm
import torch.nn.functional as F
x_train = []
x_coordinates = []
y_train = []
for i in range(10000):
# create the data. if x_coordinate > 0 and the sentence says that (represented by [1, 5, 6, 8]), then we should predict positive else negative (if the x_coordinate > 0)
# same applies if the x_coordinate < 0, just that the sentence is now [1, 5, 6, 9]
if np.random.randint(0, 2) == 0:
if np.random.randint(0, 2) == 0:
# x coordinate > 0
x_train.append([1, 5, 6, 8])
x_coordinates.append([round(np.random.uniform(0.01, 1.00, 1)[0], 2)])
y_train.append(1.0)
else:
# x coordinate > 0 negative
x_train.append([1, 5, 6, 8])
x_coordinates.append([round(np.random.uniform(-1.00, 0.00, 1)[0], 2)])
y_train.append(0.0)
else:
if np.random.randint(0, 2) == 0:
# x coordinate < 0
x_train.append([1, 5, 6, 9])
x_coordinates.append([round(np.random.uniform(-1.00, 0.00, 1)[0], 2)])
y_train.append(1.0)
else:
# x coordinate < 0 negative
x_train.append([1, 5, 6, 9])
x_coordinates.append([round(np.random.uniform(0.01, 1.00, 1)[0], 2)])
y_train.append(0.0)
# print a sample of data
print(x_train[:10])
print(y_train[:10])
print(x_coordinates[:10])
# create a dataloader
trainingDataset = FeatureDataSet(x_train=x_train, y_train=y_train, x_coordinates=x_coordinates)
train_loader = torch.utils.data.DataLoader(dataset=trainingDataset, batch_size=batch_size, shuffle=True)
# for each epoch
for epoch in range(1, n_epochs + 1):
acc_all = []
# each batch
for i, (x_batch, y_batch, x_coord_batch) in enumerate(train_loader):
x_batch = x_batch.to(device)
y_batch = y_batch.to(device)
x_coord_batch = x_coord_batch.to(device)
opt.zero_grad()
# pass in the text (x_batch) and coordinate (x_coord_batch)
out = model(x_batch, x_coordinate=x_coord_batch)
loss = criterion(out.float(), y_batch.type(torch.LongTensor).cuda())
loss.backward()
opt.step()
pred_idx = F.log_softmax(out, dim=1)
target_labels = y_batch.cpu().int()
pred_labels = torch.argmax(pred_idx, dim=-1).cpu().data.int()
curr_acc = skm.accuracy_score(target_labels, pred_labels)
acc_all.append(curr_acc)
print(np.mean(acc_all))
I suppose perhaps there are some mistakes in your dataset implementation in the PyTorch version.
I tried your pytorch BaselineModel on both the dataset in your "minimally reproducible example" and my own dataset constructed according to your description, and find that it works fine.
The following is my codes for testing on my own dataset. Note that I add several hyperparameters to the code of BaselineModel to make it run. I got accuracy over 99%.
import random
import torch
import torch.nn as nn
import numpy as np
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
lr = 0.0005
n_epochs = 100
input_dim = 4
hidden_dim = 128
layer_dim = 2
output_dim = 2
batch_size = 50
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class FeatureDataSet(torch.utils.data.Dataset):
def __init__(self, x_train, y_train, x_coordinates, y_coordinates, z_coordinates):
self.x_train = torch.tensor(x_train, dtype=torch.long)
self.y_train = torch.tensor(y_train)
self.x_coordinates = torch.tensor(x_coordinates, dtype=torch.float32)
self.y_coordinates = torch.tensor(y_coordinates, dtype=torch.float32)
self.z_coordinates = torch.tensor(z_coordinates, dtype=torch.float32)
def __len__(self):
return len(self.y_train)
def __getitem__(self, idx):
return self.x_train[idx], self.y_train[idx], self.x_coordinates[idx], self.y_coordinates[idx], self.z_coordinates[idx]
class BaselineModel(nn.Module):
def __init__(self):
super(BaselineModel, self).__init__()
vocab_size = 40
self.hidden_size = 100
self.embedding_table_size = self.hidden_size
self.encode_x = nn.Linear(1, self.hidden_size)
self.encode_y = nn.Linear(1, self.hidden_size)
self.encode_z = nn.Linear(1, self.hidden_size)
self._embeddings = nn.Embedding(vocab_size, self.embedding_table_size)
nn.init.uniform_(self._embeddings.weight, -1.0, 1.0)
self.num_layers = 1
self.rnn = nn.LSTM(self.embedding_table_size, self.hidden_size, batch_first=True)
self.fc_after_text_lstm = nn.Linear(self.hidden_size, 100)
self.fc = nn.Linear(100, 256)
self.fc_final = nn.Linear(256, 2)
self.relu_activation = nn.ReLU()
self.softmax = nn.Softmax(dim=1)
self.hidden = self.init_hidden(batch_size)
def init_hidden(self, batch_size, device='cuda:0'):
# for LSTM, we need # of layers
h_0 = torch.zeros(1, batch_size, self.hidden_size).to(device)
c_0 = torch.zeros(1, batch_size, self.hidden_size).to(device)
return h_0, c_0
def forward(self, input_text, x_coordinate=None, y_coordinate=None, z_coordinate=None):
x_embed = self.relu_activation(self.encode_x(x_coordinate.cuda().to(torch.float32)).cuda())
y_embed = self.relu_activation(self.encode_y(y_coordinate.cuda().to(torch.float32))).cuda()
z_embed = self.relu_activation(self.encode_z(z_coordinate.cuda().to(torch.float32))).cuda()
embeds = self._embeddings(input_text)
embedding, hidden = self.rnn(embeds, self.hidden)
text_fc = self.relu_activation(self.fc_after_text_lstm(embedding[:, -1]))
representations_so_far_added = torch.sum(torch.stack([text_fc, x_embed, y_embed, z_embed]), dim=0)
pre_final_embedding = self.relu_activation(self.fc(representations_so_far_added))
return self.fc_final(pre_final_embedding)
# model = RNN(input_dim, hidden_dim, layer_dim, output_dim, batch_size)
model = BaselineModel().cuda()
criterion = nn.CrossEntropyLoss()
opt = torch.optim.Adam(model.parameters(), lr=0.001)
print('Start model training')
import sklearn.metrics as skm
import torch.nn.functional as F
x_train = []
x_coordinates = []
y_coordinates = []
z_coordinates = []
y_train = []
for i in range(10000):
x_coordinate = round(np.random.uniform(-1, 1.00, 1)[0], 2)
y_coordinate = round(np.random.uniform(-1, 1.00, 1)[0], 2)
z_coordinate = round(np.random.uniform(-1, 1.00, 1)[0], 2)
x_coordinates.append([x_coordinate])
y_coordinates.append([y_coordinate])
z_coordinates.append([z_coordinate])
if np.random.randint(0, 2) == 0: # positive example
if x_coordinate <= 0 and z_coordinate <= 0:
x_train.append([1, 5, 6, 8])
elif x_coordinate <= 0 and z_coordinate > 0:
x_train.append([1, 5, 6, 9])
elif x_coordinate > 0 and z_coordinate <= 0:
x_train.append([1, 5, 6, 10])
elif x_coordinate > 0 and z_coordinate > 0:
x_train.append([1, 5, 6, 11])
y_train.append(1.0)
else:
if x_coordinate <= 0 and z_coordinate <= 0:
x_train.append(random.choice([[1, 5, 6, 9], [1, 5, 6, 10], [1, 5, 6, 11]]))
elif x_coordinate <= 0 and z_coordinate > 0:
x_train.append(random.choice([[1, 5, 6, 8], [1, 5, 6, 10], [1, 5, 6, 11]]))
elif x_coordinate > 0 and z_coordinate <= 0:
x_train.append(random.choice([[1, 5, 6, 8], [1, 5, 6, 9], [1, 5, 6, 11]]))
elif x_coordinate > 0 and z_coordinate > 0:
x_train.append(random.choice([[1, 5, 6, 8], [1, 5, 6, 9], [1, 5, 6, 10]]))
y_train.append(0.0)
# print a sample of data
print(x_train[:10])
print(y_train[:10])
print(x_coordinates[:10])
print(y_coordinates[:10])
print(z_coordinates[:10])
# create a dataloader
trainingDataset = FeatureDataSet(x_train=x_train, y_train=y_train, x_coordinates=x_coordinates, y_coordinates=y_coordinates, z_coordinates=z_coordinates)
train_loader = torch.utils.data.DataLoader(dataset=trainingDataset, batch_size=batch_size, shuffle=True)
# for each epoch
loss_meter = AverageMeter()
for epoch in range(1, n_epochs + 1):
acc_all = []
# each batch
loss_meter.reset()
for i, (x_batch, y_batch, x_coord_batch, y_coord_batch, z_coord_batch) in enumerate(train_loader):
x_batch = x_batch.to(device)
y_batch = y_batch.to(device)
x_coord_batch = x_coord_batch.to(device)
y_coord_batch = y_coord_batch.to(device)
z_coord_batch = z_coord_batch.to(device)
opt.zero_grad()
# pass in the text (x_batch) and coordinate (x_coord_batch)
out = model(x_batch, x_coordinate=x_coord_batch, y_coordinate=y_coord_batch, z_coordinate=z_coord_batch)
loss = criterion(out.float(), y_batch.type(torch.LongTensor).cuda())
loss.backward()
opt.step()
pred_idx = F.log_softmax(out, dim=1)
target_labels = y_batch.cpu().int()
pred_labels = torch.argmax(pred_idx, dim=-1).cpu().data.int()
curr_acc = skm.accuracy_score(target_labels, pred_labels)
acc_all.append(curr_acc)
loss_meter.update(loss.item())
print(np.mean(acc_all))
print("loss is %f" % loss_meter.val)
As for the "minimally reproducible example", I think the model RNN doesn't work is quite reasonable, as I have stated in the comments. I suppose that tensorflow can not fit as well, although I have not tried it. Your "minimally reproducible example" may be unrelated to your main problem.
Hi guys I'm a newbie in deep learning and I'd tried to make my own object detection model MobileNetV3 SSDlite training pascal voc 2007 dataset in tensorflow datasets(tfds).
However, It's performance is poor(mAP 0.3~0.4). And I wonder the reason why it has poor mAP.
The reasons I thought of it:
Training data problem
#I use tfds pascal voc data
#train set
(train_dataset, train_dataset2), ds_info = tfds.load(name="voc/2007", split=["train", "validation"], with_info=True)
train_dataset = train_dataset.concatenate(train_dataset2)
#test set
val_dataset = tfds.load(name="voc/2007", split="test", with_info=False)
Is there any difference between tfds voc2007 and official voc2007 dataset?
Detection model problem
I use imagenet pretrained mobilenetv3 large as a backbone and extract feature from ("multiply_11", "multiply_17") layers which resolution is 19x19 and 10x10
input_tensor = Input((300, 300, 3))
backbone = tf.keras.applications.MobileNetV3Large(include_top=False, alpha=0.75, input_tensor = input_tensor, input_shape = (300, 300, 3))
And extract extra feature map as:
def InvertedResidualBlock(filters, kernel_size, strides, padding):
f1 = Conv2D(filters=filters//2, kernel_size=1, strides = 1, padding=padding, kernel_regularizer=l2(4e-5))
f2 = BatchNormalization()
f3 = ReLU(6.)
f4 = SeparableConv2D(filters = filters,
kernel_size =
kernel_size,
strides = strides,
padding=padding,
#depthwise_regularizer = l2(4e-5),
#pointwise_regularizer = l2(4e-5)
)
f5 = BatchNormalization()
f6 = ReLU(6.)
return reduce(lambda f, g: lambda *args, **kwargs: g(f(*args, **kwargs)), (f1, f2, f3, f4, f5, f6))
class HFPNeckBuilder():
def __init__(self, config) -> None:
self.isLite = config["model_config"]["neck"]["isLite"]
if self.isLite:
self.baseConvBlock = SeparableConvBlock
self.baseConv = SeparableConv
else:
self.baseConvBlock = ConvBlock
self.baseConv = Conv
def __call__(self, ex_stage_output):
Feature_map1 = ex_stage_output[0]
Feature_map2 = ex_stage_output[-1]
Feature_map3 = InvertedResidualBlock(filters= 512, strides = 2, kernel_size = 3, padding="same")(Feature_map2)
Feature_map4 = InvertedResidualBlock(filters= 256, strides = 2, kernel_size = 3, padding="same")(Feature_map3)
Feature_map5 = InvertedResidualBlock(filters= 256, strides = 2, kernel_size = 3, padding="same")(Feature_map4)
Feature_map6 = InvertedResidualBlock(filters= 128, strides = 2, kernel_size = 3, padding="same")(Feature_map5)
return [Feature_map1, Feature_map2, Feature_map3, Feature_map4, Feature_map5, Feature_map6]
Is there any problem in backbone and neck(feature extractor which was mentioned in MobileNetV2 paper)
3.optimizer and lr schedule problem
I had experimented all combination of below setting:
Batch Size:32
Optimizer: SGD(momentum 0.9), Adam, RAdam, RAdam+LookAhead, RAdam+LookAhead+GradientCentralize
Lr Schedule: Cosine decay, Cosine restart, Cosine Warmup and decay with initial learning rate 0.1 ~1e-4
epochs: 150epochs ~ 5000epochs
But all experiments shows poor mAP(0.3~0.4).
Loss Function Problem
I tried two loss function Hard Negative mining + Smooth L1 loss and Focal loss + Smooth L1 loss. I conferd I referred keras official example code for focal loss and pierluigiferrari github for HardNegative mining.
Here is my focal loss code
class SSDLoss(tf.losses.Loss):
'''
Loss with FocalLoss rather than Hard Negative Mining. It is refered from keras reference.
Gamma makes clear the difference between Good detection and Bad detection, if gamma==0 -> crossEntrophy
alpha is weighting factor, if alpha = 0.25 ->BackGround: 0.25, ForeGround: 0.75
'''
def __init__(self, num_classes=80, alpha=0.25, gamma=2.0, config = None):
super(SSDLoss, self).__init__(reduction="auto", name="SSDLoss")
self.alpha = alpha
self.gamma = gamma
self._num_classes = num_classes
self.anchor_boxes = AnchorBox(config).get_anchors()[None, ...]
self._box_variance = [0.1, 0.1, 0.2, 0.2]
def call(self, y_true, y_pred):
y_pred = tf.cast(y_pred, dtype=tf.float32)
box_labels = y_true[:, :, :4]
box_predictions = y_pred[:, :, :4]
cls_labels = tf.one_hot(
tf.cast(y_true[:, :, 4], dtype=tf.int32),
depth=self._num_classes,
dtype=tf.float32,
)
cls_predictions = y_pred[:, :, 4:]
positive_mask = tf.cast(tf.greater(y_true[:, :, 4], -1.0), dtype=tf.float32)
ignore_mask = tf.cast(tf.equal(y_true[:, :, 4], -2.0), dtype=tf.float32)
clf_loss = self.Focal_ClassificationLoss(cls_labels, cls_predictions)
box_loss = self.SmoothL1_BoxLoss(box_labels, box_predictions)
clf_loss = tf.where(tf.equal(ignore_mask, 1.0), 0.0, clf_loss)
box_loss = tf.where(tf.equal(positive_mask, 1.0), box_loss, 0.0)
normalizer = tf.reduce_sum(positive_mask, axis=-1)
clf_loss = tf.math.divide_no_nan(tf.reduce_sum(clf_loss, axis=-1), normalizer)
box_loss = tf.math.divide_no_nan(tf.reduce_sum(box_loss, axis=-1), normalizer)
loss = clf_loss + box_loss
return loss
def SmoothL1_BoxLoss(self, y_true_Box, y_pred_box):
difference = y_true_Box - y_pred_box
absolute_difference = tf.abs(difference) - 0.5
squared_difference = 0.5 * difference ** 2
loss = tf.where(
tf.less(absolute_difference, 1.0),
squared_difference,
absolute_difference)
return tf.reduce_sum(loss, axis=-1)
def Focal_ClassificationLoss(self, y_true_Cls, y_pred_Cls):
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y_true_Cls, logits=y_pred_Cls)
probs = tf.nn.sigmoid(y_pred_Cls)
alpha = tf.where(tf.equal(y_true_Cls, 1.0), self.alpha, (1.0 - self.alpha))
pt = tf.where(tf.equal(y_true_Cls, 1.0), probs, 1 - probs)
loss = alpha * tf.pow(1.0 - pt, self.gamma) * cross_entropy
return tf.reduce_sum(loss, axis=-1)
I changed it a little bit from the keras official website example code in "Object Detection with RetinaNet".
Question
Is there any advice for improving mAP up to 0.6~0.7? thank you for reading my question.
I am trying to use Tensorflow's 2.0 new MirroredStrategy but I am receiving an error saying:
ValueError: We currently do not support distribution strategy with a `Sequential` model that is created without `input_shape`/`input_dim` set in its first layer or a subclassed model.
Model:
class Model(kr.Model):
def __init__(self, input_shape, conv_sizes, num_outputs):
super().__init__('model_1')
self.num_outputs = num_outputs
rows, cols, depth = input_shape
self.one_hot = kl.Lambda(lambda x: tf.one_hot(tf.cast(x, 'int32'), num_outputs), input_shape=(rows, cols))
self.concat = kl.Concatenate(axis=-1)
vision_layers = []
for i, (filters, kernel, stride) in enumerate(conv_sizes):
if not i:
depth += num_outputs - 1
vision_layers += [kl.Conv2D(filters, kernel, stride, activation='relu',
input_shape=(rows, cols, depth))]
else:
vision_layers += [kl.Conv2D(filters, kernel, stride, activation='relu')]
vision_layers += [kl.MaxPool2D(pool_size=(2, 2))]
flatten = kl.Flatten()
dense = kl.Dense(num_outputs)
self.net = kr.Sequential(vision_layers+[flatten]+[dense])
self.build(input_shape=(None, ) + input_shape)
def call(self, inputs):
one_hot = self.one_hot(inputs[:, :, :, -1])
return self.net(self.concat([inputs[:, :, :, :-1], one_hot]))
Reproduction code:
model_args = {'conv_sizes': [(32, (2, 2), 1), (32, (2, 2), 1), (32, (2, 2), 1)],
'input_shape': (50, 50, 6),
'num_outputs': 5}
def dummy_loss(values, targets):
return tf.reduce_sum(values-targets, axis=-1)
mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
model = Model(**model_args)
model.compile(optimizer=kr.optimizers.Adam(learning_rate=0.01), loss=dummy_loss)
Output:
Traceback (most recent call last):
File "/home/joao/anaconda3/envs/tf2/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 3296, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-4-dc492e7c638b>", line 18, in <module>
model.compile(optimizer=kr.optimizers.Adam(learning_rate=0.01), loss=dummy_loss)
File "/home/joao/anaconda3/envs/tf2/lib/python3.6/site-packages/tensorflow/python/training/tracking/base.py", line 456, in _method_wrapper
result = method(self, *args, **kwargs)
File "/home/joao/anaconda3/envs/tf2/lib/python3.6/site-packages/tensorflow/python/keras/engine/training.py", line 263, in compile
'We currently do not support distribution strategy with a '
ValueError: We currently do not support distribution strategy with a `Sequential` model that is created without `input_shape`/`input_dim` set in its first layer or a subclassed model.
Model Summary (model.summary()):
Model: "model_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
lambda (Lambda) multiple 0
_________________________________________________________________
concatenate (Concatenate) multiple 0
_________________________________________________________________
sequential (Sequential) (None, 5) 13573
=================================================================
Total params: 13,573
Trainable params: 13,573
Non-trainable params: 0
I would do away with the Sequential approach and use the Model class directly:
def create_model(input_shape, conv_sizes, fc_sizes, num_outputs):
num_outputs = num_outputs
rows, cols, depth = input_shape
input_layer = kl.Input(shape=(rows, cols, depth))
actions = tf.slice(input_layer, [0, 0, 0, depth - 1], [-1, rows, cols, 1])
non_actions = tf.slice(input_layer, [0, 0, 0, 0], [-1, rows, cols, depth - 1])
one_hot = kl.Lambda(lambda x: tf.one_hot(tf.cast(x, 'int32'), num_outputs),
input_shape=(rows, cols))(actions)
concat = kl.Concatenate(axis=-1)([non_actions, tf.reshape(one_hot, (-1, rows, cols, num_outputs))])
vision_layer = concat
for i, (filters, kernel, stride) in enumerate(conv_sizes):
vision_layer = kl.Conv2D(filters, kernel, stride, activation='relu')(vision_layer)
vision_layer = kl.MaxPool2D(pool_size=(2, 2))(vision_layer)
flatten = kl.Flatten()(vision_layer)
dense = kl.Dense(num_outputs)(flatten)
return kr.Model(inputs=input_layer, outputs=[dense])
I have the following model in tensorflow:
def output_layer(input_layer, num_labels):
'''
:param input_layer: 2D tensor
:param num_labels: int. How many output labels in total? (10 for cifar10 and 100 for cifar100)
:return: output layer Y = WX + B
'''
input_dim = input_layer.get_shape().as_list()[-1]
fc_w = create_variables(name='fc_weights', shape=[input_dim, num_labels],
initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
fc_b = create_variables(name='fc_bias', shape=[num_labels], initializer=tf.zeros_initializer())
fc_h = tf.matmul(input_layer, fc_w) + fc_b
return fc_h
def model(input_features):
with tf.variable_scope("GRU"):
cell1 = tf.nn.rnn_cell.GRUCell(gru1_cell_size)
cell2 = tf.nn.rnn_cell.GRUCell(gru2_cell_size)
mcell = tf.nn.rnn_cell.MultiRNNCell([cell1, cell2], state_is_tuple=False)
# shape=(?, 64 + 32)
initial_state = tf.placeholder(shape=[None, gru1_cell_size + gru2_cell_size], dtype=tf.float32, name="initial_state")
output, new_state = tf.nn.dynamic_rnn(mcell, input_features, dtype=tf.float32, initial_state=initial_state)
with tf.variable_scope("output_reshaped"):
# before, shape: (34, 1768, 32), after, shape: (34 * 1768, 32)
output = tf.reshape(output, shape=[-1, gru2_cell_size])
with tf.variable_scope("output_layer"):
# shape: (34 * 1768, 3)
predictions = output_layer(output, num_labels)
predictions = tf.reshape(predictions, shape=[-1, 100, 3])
return predictions, initial_state, new_state, output
So as we can see from the code that the cell size of the first GRU is 64, the cell size of the second GRU is 32. And the batch size is 34 (but this is not important for me now). And the size of input features is 200. I have tried computing the gradients of the loss with respect to the trainable variables through:
local_grads_and_vars = optimizer.compute_gradients(loss, tf.trainable_variables())
# only the gradients are taken to add them later with the back propagated gradients from previous batch.
local_grads = [grad for grad, var in local_grads_and_vars]
for v in local_grads:
print("v", v)
After printing out the grads I got the following:
v Tensor("Optimizer/gradients/GRU_Layer1/rnn/while/gru_cell/MatMul/Enter_grad/b_acc_3:0", shape=(264, 128), dtype=float32)
v Tensor("Optimizer/gradients/GRU_Layer1/rnn/while/gru_cell/BiasAdd/Enter_grad/b_acc_3:0", shape=(128,), dtype=float32)
v Tensor("Optimizer/gradients/GRU_Layer1/rnn/while/gru_cell/MatMul_1/Enter_grad/b_acc_3:0", shape=(264, 64), dtype=float32)
v Tensor("Optimizer/gradients/GRU_Layer1/rnn/while/gru_cell/BiasAdd_1/Enter_grad/b_acc_3:0", shape=(64,), dtype=float32)
v Tensor("Optimizer/gradients/GRU_Layer2/rnn/while/gru_cell/MatMul/Enter_grad/b_acc_3:0", shape=(96, 64), dtype=float32)
v Tensor("Optimizer/gradients/GRU_Layer2/rnn/while/gru_cell/BiasAdd/Enter_grad/b_acc_3:0", shape=(64,), dtype=float32)
v Tensor("Optimizer/gradients/GRU_Layer2/rnn/while/gru_cell/MatMul_1/Enter_grad/b_acc_3:0", shape=(96, 32), dtype=float32)
v Tensor("Optimizer/gradients/GRU_Layer2/rnn/while/gru_cell/BiasAdd_1/Enter_grad/b_acc_3:0", shape=(32,), dtype=float32)
v Tensor("Optimizer/gradients/output_layer/MatMul_grad/tuple/control_dependency_1:0", shape=(32, 3), dtype=float32)
v Tensor("Optimizer/gradients/output_layer/add_grad/tuple/control_dependency_1:0", shape=(3,), dtype=float32)
Assume that I saved the gradients after training the model on the first batch, that is, after feeding a tensor of shape: (34, 100, 200) as input_features "In the model function argument", and output of shape (34 * 100, 3), how to back propagate these gradients on the second mini-batch?
From the documentation of tf.gradients
grad_ys is a list of tensors of the same length as ys that holds the initial gradients for each y in ys. When grad_ys is None, we fill in a tensor of '1's of the shape of y for each y in ys. A user can provide their own initial grad_ys to compute the derivatives using a different initial gradient for each y (e.g., if one wanted to weight the gradient differently for each value in each y).
So your grad_ys should be a list with the same length as the input ys.
Copying your code I was able to get the following to run:
prev_grad_pl = [tf.placeholder(tf.float32, [batch, i]) for i in [64, 32]]
prev_grad_init = {l: np.ones(l.get_shape().as_list()) for l in prev_grad_pl}
prev_grads_val__ = tf.gradients([new_state1, new_state2], [initial_state1, initial_state2], grad_ys=prev_grad_pl)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
feed = {initial_state1: np.zeros([batch, gru1_cell_size]),
initial_state2: np.zeros([batch, gru2_cell_size])}
for k in prev_grad_init:
feed[k] = prev_grad_init[k]
grad1, grad2 = sess.run(prev_grads_val__, feed_dict=feed)
Here is the solution with a custom code:
import tensorflow as tf
import numpy as np
cell_size = 32
seq_length = 1000
time_steps1 = 500
time_steps2 = seq_length - time_steps1
x_t = np.arange(1, seq_length + 1)
x_t_plus_1 = np.arange(2, seq_length + 2)
tf.set_random_seed(123)
m_dtype = tf.float32
input_1 = tf.placeholder(dtype=m_dtype, shape=[None, time_steps1, 1], name="input_1")
input_2 = tf.placeholder(dtype=m_dtype, shape=[None, time_steps2, 1], name="input_2")
labels1 = tf.placeholder(dtype=m_dtype, shape=[None, time_steps1, 1], name="labels_1")
labels2 = tf.placeholder(dtype=m_dtype, shape=[None, time_steps2, 1], name="labels_2")
labels = tf.concat([labels1, labels2], axis=1, name="labels")
def model(input_feat1, input_feat2):
with tf.variable_scope("GRU"):
cell1 = tf.nn.rnn_cell.GRUCell(cell_size)
cell2 = tf.nn.rnn_cell.GRUCell(cell_size)
initial_state = tf.placeholder(shape=[None, cell_size], dtype=m_dtype, name="initial_state")
with tf.variable_scope("First50"):
# output1: shape=[1, time_steps1, 32]
output1, new_state1 = tf.nn.dynamic_rnn(cell1, input_feat1, dtype=m_dtype, initial_state=initial_state)
with tf.variable_scope("Second50"):
# output2: shape=[1, time_steps2, 32]
output2, new_state2 = tf.nn.dynamic_rnn(cell2, input_feat2, dtype=m_dtype, initial_state=new_state1)
with tf.variable_scope("output"):
# output shape: [1, time_steps1 + time_steps2, 32] => [1, 100, 32]
output = tf.concat([output1, output2], axis=1)
output = tf.reshape(output, shape=[-1, cell_size])
output = tf.layers.dense(output, units=1)
output = tf.reshape(output, shape=[1, time_steps1 + time_steps2, 1])
with tf.variable_scope("outputs_1_2_reshaped"):
output1 = tf.slice(input_=output, begin=[0, 0, 0], size=[-1, time_steps1, -1])
output2 = tf.slice(input_=output, begin=[0, time_steps1, 0], size=[-1, time_steps2, 1])
print(output.get_shape().as_list(), "1")
print(output1.get_shape().as_list(), "2")
print(output2.get_shape().as_list(), "3")
return output, output1, output2, initial_state, new_state1, new_state2
def loss(output, output1, output2, labels, labels1, labels2):
loss = tf.reduce_sum(tf.sqrt(tf.square(output - labels)))
loss1 = tf.reduce_sum(tf.sqrt(tf.square(output1 - labels1)))
loss2 = tf.reduce_sum(tf.sqrt(tf.square(output2 - labels2)))
return loss, loss1, loss2
def optimize(loss, loss1, loss2, initial_state, new_state1, new_state2):
with tf.name_scope('Optimizer'):
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
grads1 = tf.gradients(loss2, new_state1)
grads2 = tf.gradients(loss1, initial_state)
grads3 = tf.gradients(new_state1, initial_state, grad_ys=grads1)
grads_wrt_initial_state_1 = tf.add(grads2, grads3)
grads_wrt_initial_state_2 = tf.gradients(loss, initial_state, grad_ys=None)
return grads_wrt_initial_state_1, grads_wrt_initial_state_2
output, output1, output2, initial_state, new_state1, new_state2 = model(input_1, input_2)
loss, loss1, loss2 = loss(output, output1, output2, labels, labels1, labels2)
grads_wrt_initial_state_1, grads_wrt_initial_state_2 = optimize(loss, loss1, loss2, initial_state, new_state1, new_state2)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
in1 = np.reshape(x_t[:time_steps1], newshape=(1, time_steps1, 1))
in2 = np.reshape(x_t[time_steps1:], newshape=(1, time_steps2, 1))
l1 = np.reshape(x_t_plus_1[:time_steps1], newshape=(1, time_steps1, 1))
l2 = np.reshape(x_t_plus_1[time_steps1:], newshape=(1, time_steps2, 1))
i_s = np.zeros([1, cell_size])
t1, t2 = sess.run([grads_wrt_initial_state_1, grads_wrt_initial_state_2], feed_dict={input_1: in1,
input_2: in2,
labels1: l1,
labels2: l2,
initial_state: i_s})
print(np.mean(t1), np.mean(t2))
print(np.sum(t1), np.sum(t2))
This is an example of 2 GRUs one after the other, and I did back propagation in 2 different ways according to the code in optimize()
tf.trainable_variables() returns a list of all trainable variable objects. When an object from the list is passed to an op, such as tf.nn.l2_loss, TensorFlow is able to cast the object as a Tensor and perform the necessary calculations. However, passing the same object to a user defined function throws an error.
Consider the following two layer network to work with:
# Generate random data
x_train = np.random.rand(64, 16, 16, 8)
y_train = np.random.randint(0, 5, 64)
one_hot = np.zeros((len(y_train), 5))
one_hot[list(np.indices((len(y_train),))) + [y_train]] = 1
y_train = one_hot
# Model definition
class FeedForward(object):
def __init__(self, l2_lambda=0.01):
self.x = tf.placeholder(tf.float32, shape=[None, 16, 16, 4], name="input_x")
self.y = tf.placeholder(tf.float32, [None, 5], name="input_y")
l2_loss = tf.constant(0.0)
with tf.name_scope("conv1"):
kernel_shape=[1, 1, 4, 4]
w = tf.Variable(tf.truncated_normal(kernel_shape, stddev=0.1), name="weight")
conv1 = tf.nn.conv2d(self.x, w, strides=[1, 1, 1, 1], padding="SAME", name="conv")
with tf.name_scope("conv2"):
kernel_shape=[1, 1, 4, 2]
w = tf.Variable(tf.truncated_normal(kernel_shape, stddev=0.1), name="weight")
conv2 = tf.nn.conv2d(conv1, w, strides=[1, 1, 1, 1], padding="SAME", name="conv")
out = tf.contrib.layers.flatten(conv2)
with tf.name_scope("output"):
kernel_shape=[out.get_shape()[1].value, 5]
w = tf.Variable(tf.truncated_normal(kernel_shape, stddev=0.1), name="weight")
self.scores = tf.matmul(out, w, name="scores")
predictions = tf.argmax(self.scores, axis=1, name="predictions")
# L2 Regularizer
if l2_reg_lambda > 0.:
l2_loss = tf.add_n([self.some_norm(var) for var in tf.trainable_variables() if ("weight" in var.name)])
losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.y)
self.loss = tf.reduce_mean(losses) + (l2_lambda * l2_loss)
correct_predictions = tf.equal(predictions, tf.argmax(self.y, axis=1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
def some_norm(w):
# operate on w and return scalar
# (only) for example
return (1 / tf.nn.l2_loss(w))
with tf.Graph().as_default():
sess = tf.Session()
with sess.as_default():
ffn = FeedForward()
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-2)
grads_and_vars = optimizer.compute_gradients(ffn.loss)
sess.run(tf.global_variables_initializer())
def train_step(x_batch, y_batch):
feed_dict = {
ffn.x: x_batch,
ffn.y: y_batch,
}
_, step, loss, accuracy = sess.run([train_op, global_step, ffn.loss, ffn.accuracy], feed_dict)
print("step {}, loss {:g}, acc {:g}".format(step, loss, accuracy))
batch_size = 32
n_epochs = 4
s_idx = - batch_size
for batch_index in range(n_epochs):
s_idx += batch_size
e_idx = s_idx + batch_size
x_batch = x_train[s_idx:e_idx]
y_batch = y_train[s_idx:e_idx]
train_step(x_batch, y_batch)
current_step = tf.train.global_step(sess, global_step)
The problem here is that on passing the trainable variable to some_norm(), it is passed as an object and can not be operated on. The related error message encountered at the first line inside some_norm() is:
Failed to convert object of type <class '__main__.FeedForward'> to Tensor.
Contents: <__main__.FeedForward object at 0x7fefde7e97b8>.
Consider casting elements to a supported type.
Is there a way to cast the object returned by tf.trainable_variables() as a tensor or is there a possible workaround such as passing a reference?
How is using the above different from using l2_loss = tf.add_n([tf.nn.l2_loss(var) for var in tf.trainable_variables()...]) which works just fine?
You forgot the self argument in your some_norm implementation def some_norm(w):, so it tries to convert your instance of the class (self) to a tensor.