Conditional substract in tensorflow - tensorflow2.0

I'm trying to calculate a custom MSE in tf.keras, such as:
def custom_mse(y_true, y_pred):
return tf.reduce_mean(tf.square(y_true - y_pred), axis=-1)
But I want to calculate the difference y_true - y_pred, except when values of y_true are equal to -99.0.
How could I do this?

What do you want the function to return if y_true == -99.0?
Can you not do this?
def custom_mse(y_true, y_pred):
#check if the batch of y_true has -99
if -99 in y_true:
#do whatever you like with the batch
return #whatever
return tf.reduce_mean(tf.square(y_true - y_pred), axis=-1)
If you meant to average only the errors where y_true is not -99, I guess you can simply do this:
def custom_mse(y_true, y_pred):
return tf.reduce_mean(tf.square(y_true - y_pred)[y_true != -99], axis=-1)
Cheers,
Keivan

Another way.
def custom_mse(y_true, y_pred):
return tf.cond(y_true != -99.0, lambda: tf.reduce_mean(tf.square(y_true - y_pred), axis=-1),
lambda: #Add logic)

Related

how to design a custom loss function to add two loss

I am using CNN to solve a regression problem in a supervised manner. i have input data(X_train) and the target data(y_train).
from keras import backend as K
def custom_loss(loss1, loss2):
def loss(y_true, y_pred):
return loss1(y_true, y_pred) + loss2(y_true, y_pred)
return loss
Example usage
from keras.losses import mean_squared_error
model.compile(loss=custom_loss(mean_squared_error, mean_absolute_error), optimizer='adam')
After you declare your model, you can define a custom loss function in the model.compile step. Here's a snippet in the case you need to sum MSE and MAE:
model = keras.Sequential([
Dense(32, input_shape=(10,), activation='softmax')
])
def custom_loss_function(y_true, y_pred):
mse = MeanSquaredError()
mae = MeanAbsoluteError()
return mse(y_true, y_pred) + mae(y_true, y_pred)
model.compile(loss=custom_loss_function, optimizer='adam')

How To Predict Value Inside Model Loss With Using Current Model Weights?

I want to get other predicted value rather than y_true and y_pred inside model loss function. I have tried something but they didn't work so how can I use model.predict inside loss function in Tensorflow?
class CustomLoss(tf.keras.losses.Loss):
def __init__(self, anchor, positive, model):
super().__init__()
self.anchor=anchor
self.positive=positive
self.model=model
def convert(self, image):
c_im=self.model(image.reshape(1, 160, 160, 3))
print(c_im)
c_im=tf.make_ndarray(c_im)
return c_im/np.linalg.norm(c_im, ord=2)
def call(self, y_true, y_pred):
y_pred_c=tf.cast(y_pred, dtype=tf.float32)
anchor, positive = self.convert(self.anchor), self.convert(self.positive)
anchor, positive = tf.convert_to_tensor(anchor, dtype=tf.float32), tf.convert_to_tensor(positive, dtype=tf.float32)
pos_d=tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), axis=-1)
neg_d=tf.reduce_sum(tf.square(tf.subtract(anchor, y_pred_c)), axis=-1)
basic_l=tf.add(tf.subtract(pos_d, neg_d), 0.2)
print(basic_l)
del anchor
del positive
return tf.reduce_sum(tf.maximum(basic_l, 0), axis=None)

how to customize a loss function with an extra variable in keras

I want to train a model with a self-customized loss function. The loss includes two parts. Part1 and part2 can be calculated with y_true (labels) and y_predicted (real output).
However, the loss = part1 +lambda part2
The lambda is a variable that should be able to adjust together with the parameters of the network model. In tensorflow, it seems the lambda can be defined as tf.Variable to updated. However, how can I do it in Keras?
Alright, I have come up with a solution. It is ugly, but it's a solution:
class UtilityLayer(Layer):
def build(self, input_shape):
self.kernel = self.add_weight(
name='kernel',
shape=(1,),
initializer='ones',
trainable=True,
constraint='nonneg'
)
super().build(input_shape)
def call(self, inputs, **kwargs):
return self.kernel
switch = -1
last_loss = 0
def custom_loss_builder(utility_layer):
def custom_loss(y_true, y_pred):
global switch, last_loss
switch *= -1
if switch == 1:
last_loss = utility_layer.trainable_weights[0] * MSE(y_true, y_pred)
return last_loss # your network loss
else:
return last_loss # your lambda loss
return custom_loss
dummy_y = np.empty(len(x))
inputs = Input(shape=(1,))
x = Dense(2, activation='relu')(inputs)
outputs = Dense(1)(x)
utility_outputs = UtilityLayer()(inputs)
model = Model(inputs, [outputs, utility_outputs])
model.compile(optimizer='adam', loss=custom_loss_builder(model.layers[-1]))
model.fit(x, [y, dummy_y], epochs=100)
And the evolution of your lambda:

How to define a ReLU with TensorFlow custom_gradient?

I'm practicing using TensorFlow's custom_gradient decorator and I tried to define a simple ReLU. One would think it would be as simple as defining the gradient to be 1 when x > 0 and 0 otherwise. However, the following code does not yield the same gradients as a ReLU:
#tf.custom_gradient
def relu(x):
def grad(dy):
return tf.cond(tf.reshape(x, []) > 0,
lambda: tf.cast(tf.reshape(1, dy.shape), tf.float32),
lambda: tf.cast(tf.reshape(0, dy.shape), tf.float32))
return tf.nn.relu(x), grad
Can someone explain to me why this standard definition of ReLU's gradient does not yield the same performance as:
#tf.custom_gradient
def relu(x):
def grad(dy):
return dy
return tf.nn.relu(x), grad

Why use axis=-1 in Keras metrics function?

keras version:2.0.8
In some Keras metric functions and loss functions, use axis=-1 as parameter.
For example:
def binary_accuracy(y_true, y_pred):
return K.mean(K.equal(y_true, K.round(y_pred)), axis=-1)
In my case:
shape of y_true:(4,256,256,2)
shape of y_pred:(4,256,256,2)
So, binary_accuracy(y_true, y_pred) should return a tensor with shape=(4,256,256) instead of a scalar tensor.
But when use binary_accuracy as metric function:
model.compile(optimizer=adam, loss=keras.losses.binary_crossentropy, metrics=[binary_accuracy])
The log still prints binary_accuracy as scalar,which confused me a lot.
Does keras do some special on the return of binary_accuracy function?
Epoch 11/300
0s - loss: 0.4158 - binary_accuracy: 0.9308 - val_loss: 0.4671 -
val_binary_accuracy: 0.7767
Here's what you're looking for, inside training_utils.py:
def weighted(y_true, y_pred, weights, mask=None):
"""Wrapper function.
# Arguments
y_true: `y_true` argument of `fn`.
y_pred: `y_pred` argument of `fn`.
weights: Weights tensor.
mask: Mask tensor.
# Returns
Scalar tensor.
"""
# score_array has ndim >= 2
score_array = fn(y_true, y_pred)
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in Theano
mask = K.cast(mask, K.floatx())
# mask should have the same shape as score_array
score_array *= mask
# the loss per batch should be proportional
# to the number of unmasked samples.
score_array /= K.mean(mask) + K.epsilon()
# apply sample weighting
if weights is not None:
# reduce score_array to same ndim as weight array
ndim = K.ndim(score_array)
weight_ndim = K.ndim(weights)
score_array = K.mean(score_array,
axis=list(range(weight_ndim, ndim)))
score_array *= weights
score_array /= K.mean(K.cast(K.not_equal(weights, 0), K.floatx()))
return K.mean(score_array)
return weighted
The metric function is called by score_array = fn(y_true, y_pred) (it's a nested function and fn is defined in the outer function). This array is averaged in the last line return K.mean(score_array). That's why you're seeing scalar metrics instead of tensors. The lines in between are just to introduce masks and weights if necessary.