regularization with customized training in tf.keras (TF2.0) - tensorflow2.0

I have a keras model with regularization initialized with kernel_regularizer. In the customized training loop, where GradientTape is used, does optimizer.apply_gradients applies regularization loss automatically? if not, the following code shows an implementation, is that a good way to apply regularization? Am I doing the right way?
def GetModel():
inputs = Input(shape=(784,), name='digits')
x = Dense(64, activation='relu', kernel_regularizer=l2(1e-4))(inputs)
x = Dense(64, activation='relu', kernel_regularizer=l2(1e-4))(x)
outputs = Dense(10, name='predictions')(x)
model = Model(inputs=inputs, outputs=outputs)
return model
mymodel = GetModel()
#tf.function
def train_step(x, y_true, optimizer):
with tf.GradientTape() as tape:
y_pred = mymodel(x, training=True)
loss_training = my_custom_loss_function(y_true, y_pred)
loss_regularization = tf.math.add_n(mymodel.losses)
loss_total = loss_training + loss_regularization
gradients = tape.gradient(loss_total, mymodel.trainable_variables)
optimizer.apply_gradients(zip(gradients, mymodel.trainable_variables))
return loss_total

Related

Transformer didn't work well with tensorflow gradient tape

I implemented transformer with tensorflow 2.0. The model works well when I train the model with model.fit(dataset)
However, when I train the model with tensorflow.GradientTape and evaluate it, the model yields blank space token for all inputs. Here is my code, and tensorflow version is 2.7.0
def loss_function(y_true, y_pred):
y_true = tf.reshape(y_true, shape=(-1, MAX_LENGTH - 1))
loss = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction='none')(y_true, y_pred)
mask = tf.cast(tf.not_equal(y_true, 0), tf.float32)
loss = tf.multiply(loss, mask)
return tf.reduce_mean(loss)
for epoch in range(num_epochs):
for step, data in enumerate(dataset):
enc_inputs, dec_inputs, outputs = data[0]['inputs'], data[0]['dec_inputs'], data[1]['outputs']
with tf.GradientTape() as tape:
logits = model([enc_inputs, dec_inputs], training = True)
loss = loss_function(outputs, logits)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
I think there is no problem with my transformer model code, because it works well with model.fit(dataset). What's wrong with my code?

How to apply Monte Carlo Dropout, in tensorflow, for an LSTM if batch normalization is part of the model?

I have a model composed of 3 LSTM layers followed by a batch norm layer and finally dense layer. Here is the code:
def build_uncomplied_model(hparams):
inputs = tf.keras.Input(shape=(None, hparams["n_features"]))
x = return_RNN(hparams["rnn_type"])(hparams["cell_size_1"], return_sequences=True, recurrent_dropout=hparams['dropout'])(inputs)
x = return_RNN(hparams["rnn_type"])(hparams["cell_size_2"], return_sequences=True)(x)
x = return_RNN(hparams["rnn_type"])(hparams["cell_size_3"], return_sequences=True)(x)
x = layers.BatchNormalization()(x)
outputs = layers.TimeDistributed(layers.Dense(hparams["n_features"]))(x)
model = tf.keras.Model(inputs, outputs, name=RNN_type + "_model")
return model
Now I am aware that to apply MCDropout, we can apply the following code:
y_predict = np.stack([my_model(X_test, training=True) for x in range(100)])
y_proba = y_predict.mean(axis=0)
However, setting training = True will force the batch norm layer to overfit the testing dataset.
Additionally, building a custom Dropout layer while setting training to True isn't a solution in my case because I am using LSTM.
class MCDropout(tf.keras.layers.Dropout):
def call(self, inputs):
return super().call(inputs, training=True)
Any help is much appreciated!!
A possible solution could be to create a custom LSTM layer. You should override the call method to force the training flag to be True
class MCLSTM(keras.layers.LSTM):
def __init__(self, units, **kwargs):
super(MCLSTM, self).__init__(units, **kwargs)
def call(self, inputs, mask=None, training=None, initial_state=None):
return super(MCLSTM, self).call(
inputs,
mask=mask,
training=True,
initial_state=initial_state,
)
Then you can use it in your code
def build_uncomplied_model(hparams):
inputs = tf.keras.Input(shape=(None, hparams["n_features"]))
x = MCLSTM(hparams["cell_size_1"], return_sequences=True, recurrent_dropout=hparams['dropout'])(inputs)
x = return_RNN(hparams["rnn_type"])(hparams["cell_size_2"], return_sequences=True)(x)
x = return_RNN(hparams["rnn_type"])(hparams["cell_size_3"], return_sequences=True)(x)
x = layers.BatchNormalization()(x)
outputs = layers.TimeDistributed(layers.Dense(hparams["n_features"]))(x)
model = tf.keras.Model(inputs, outputs, name=RNN_type + "_model")
return model
or add it to your return_RNN factory (a more elegant way)
===== EDIT =====
Another solution could be to add the training flag when creating the model. Something like this:
def build_uncomplied_model(hparams):
inputs = tf.keras.Input(shape=(None, hparams["n_features"]))
# This the Monte Carlo LSTM
x = LSTM(hparams["cell_size_1"], return_sequences=True, recurrent_dropout=hparams['dropout'])(inputs, training=True)
x = return_RNN(hparams["rnn_type"])(hparams["cell_size_2"], return_sequences=True)(x)
x = return_RNN(hparams["rnn_type"])(hparams["cell_size_3"], return_sequences=True)(x)
x = layers.BatchNormalization()(x)
outputs = layers.TimeDistributed(layers.Dense(hparams["n_features"]))(x)
model = tf.keras.Model(inputs, outputs, name=RNN_type + "_model")
return model

Weighted Absolute Error implementation doesn't work in tensorflow (keras)

I have created custom loss (Weighted Absolute error) in keras but implementation doesn't work - I get an error ValueError: No gradients provided for any variable: ['my_model/conv2d/kernel:0', 'my_model/conv2d/bias:0'].
I want to apply different weight for each pixel.
class WeightedMeanAbsoluteError(tf.keras.metrics.Metric):
def __init__(self, name='weighted_mean_absolute_error'):
super(WeightedMeanAbsoluteError, self).__init__(name=name)
self.wmae = self.add_weight(name='wmae', initializer='zeros')
def update_state(self, y_true, y_pred, loss_weights):
values = tf.math.abs(y_true - y_pred) * loss_weights
return self.wmae.assign_add(tf.reduce_sum(values))
def result(self):
return self.wmae
def reset_states(self):
# The state of the metric will be reset at the start of each epoch.
self.wmae.assign(0.)
loss_object = WeightedMeanAbsoluteError()
train_loss = WeightedMeanAbsoluteError()
I use the following code to implement a training step:
#tf.function
def train_step(input_images, output_images):
with tf.GradientTape() as tape:
# training=True is only needed if there are layers with different
# behavior during training versus inference (e.g. Dropout).
result_images = model(input_images, training=True)
loss = loss_object(output_images, result_images)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
Also my code works just fine if I use
loss_object = tf.keras.losses.MeanAbsoluteError()
train_loss = tf.keras.metrics.MeanAbsoluteError()
The best and simple way to minimize a weighted standard loss (such mae) is using the sample_weights parameter in fit method where we pass an array with the desired weight of each sample
X = np.random.uniform(0,1, (1000,50))
y = np.random.uniform(0,1, 1000)
W = np.random.randint(1,10, 1000)
inp = Input((50))
x = Dense(64, activation='relu')(inp)
out = Dense(10)(x)
model = Model(inp, out)
model.compile('adam','mae')
model.fit(X,y, epochs=100, sample_weights=W)

Different results from Tensorflow and Keras

I get different results from Tensorflow and Keras with the same network structure.
The loss function looks like
class MaskedMultiCrossEntropy(object):
def loss(self, y_true, y_pred):
vec = tf.nn.softmax_cross_entropy_with_logits(logits=y_pred, labels=y_true, dim=1)
mask = tf.equal(y_true[:,0,:], -1)
zer = tf.zeros_like(vec)
loss = tf.where(mask, x=zer, y=vec)
return loss
The network layer I used is called CrowdsClassification, which is implemented by Keras. Then I build the network by
x = Dense(128, input_shape=(input_dim,), activation='relu')(inputs)
x = Dropout(0.5)(x)
x = Dense(N_CLASSES)(x)
x = Activation("softmax")(x)
crowd = CrowdsClassification(num_classes, num_oracles, conn_type="MW")
x = crowd(x)
Train the model with Keras
model = Model(inputs=inputs, outputs=x)
model.compile(optimizer='adam', loss=loss)
model.fit(inputs,
true_class, epochs=100, shuffle=False, verbose=2, validation_split=0.1))
Train the model with tensorflow
optimizer = tf.train.AdamOptimizer(learning_rate=0.01, beta1=0.9, beta2=0.999)
opt_op = optimizer.minimize(loss, global_step=global_step)
sess.run(tf.global_variables_initializer())
for epoch in range(100):
sess.run([loss, opt_op], feed_dict=train_feed_dict)
The Tensorflow will get a wrong prediction. It seems that the issue comes from the loss function, that Tensorflow cannot backproporgate the masked loss. Anyone can give some advices? Thx a lot.

Keras gives 'Not JSON Serializable' error when saving the model

I'm implementing a fully convolutional neural network for image segmentation by using unet defined here
https://github.com/zhixuhao
To give different weights to the pixels of different classes I defined an extra Lambda layer, as suggested here
Keras, binary segmentation, add weight to loss function
The problem is that Keras raises this error when saving the model
.....
self.model.save(filepath, overwrite=True)
.....
TypeError: ('Not JSON Serializable:', b'\n\x15clip_by_value/Minimum\x12\x07Minimum\x1a\x12conv2d_23/Identity\x1a\x17clip_by_value/Minimum/y*\x07\n\x01T\x12\x020\x01')
My network is defined in an external function
def weighted_binary_loss(X):
y_pred, y_true, weights = X
loss = binary_crossentropy(y_true, y_pred)
weights_mask = y_true*weights[0] + (1.-y_true)*weights[1]
loss = multiply([loss, weights_mask])
return loss
def identity_loss(y_true, y_pred):
return y_pred
def net()
.....
....
conv10 = Conv2D(1, 1, activation = 'sigmoid')(conv9)
w_loss = Lambda(weighted_binary_loss, output_shape=input_size, name='loss_output')([conv10, inputs, weights])
model = Model(inputs = inputs, outputs = w_loss)
model.compile(optimizer = Adam(lr = 1e-5), loss = identity_loss, metrics = ['accuracy'])
that I call in my main function
...
model_checkpoint = ModelCheckpoint('temp_model.hdf5', monitor='loss',verbose=1, save_best_only=True)
model.fit_generator(imgs,steps_per_epoch=20,epochs=1,callbacks=[model_checkpoint])
When I erase the Lambda layer, the error desappears
...
conv10 = Conv2D(1, 1, activation = 'sigmoid')(conv9)
model = Model(inputs = inputs, outputs = conv10)
model.compile(optimizer = Adam(lr = 1e-5), loss = 'binary_crossentropy', metrics = ['accuracy'])
I'm using
Keras==2.2.4, tensorflow-gpu==2.0.0b1
It appears that you are computing the loss in the layer of a model. It is not a good practice to accomodate the loss function as a layer. You can compute your weighted loss using custom loss function.
So your code can be rewritten as follows:
def weighted_binary_loss(y_true, y_pred):
weights = [0.5, 0.6] # Define your weights here
loss = binary_crossentropy(y_true, y_pred)
weights_mask = y_true*weights[0] + (1.-y_true)*weights[1]
loss = multiply([loss, weights_mask])
return loss
conv10 = Conv2D(1, 1, activation = 'sigmoid')(conv9)
model = Model(inputs = inputs, outputs = conv10)
model.compile(optimizer = Adam(lr = 1e-5), loss = weighted_binary_loss, metrics = ['accuracy'])
If it is needed that weights is a dynamic property and you have to send it as a separate parameter in loss function, you can follow this question.