Converting from PyTorch to Tensorflow for Self-Attention Pooling Layer - tensorflow

I have found an implementation of the said layer from this paper, "Self-Attention Encoding and Pooling for Speaker Recognition", available at here via Pytorch. However, due to CUDA compatibility issues, I can't want to use the said code. Also, thus far, all my codes have been implemented in Tensorflow. So, I want to do a one-to-one translation/conversion or whatever, from PyTorch to Tensorflow.
First of all, this is the code in PyTorch:
class SelfAttentionPooling(nn.Module):
def __init__(self, input_dim):
super(SelfAttentionPooling, self).__init__()
self.W = nn.Linear(input_dim, 1)
def forward(self, batch_rep):
"""
input:
batch_rep : size (N, T, H), N: batch size, T: sequence length, H: Hidden dimension
attention_weight:
att_w : size (N, T, 1)
return:
utter_rep: size (N, H)
"""
softmax = nn.functional.softmax
att_w = softmax(self.W(batch_rep).squeeze(-1)).unsqueeze(-1)
utter_rep = torch.sum(batch_rep * att_w, dim=1)
return utter_rep
And this is my translation of the snippet code to Tensorflow:
class Self_Attention_Pooling(keras.layers.Layer): ?
def __init__(self, input_dim):
super(Self_Attention_Pooling, self).__init__()
self.W = Dense(input_dim)
def forward(self, batch_rep):
softmax = Softmax()
att_w = self.W(batch_rep)
att_w = softmax(att_w)
# Not so sure about these two lines though.
#x = np.expand(batch_rep)
#att_w = softmax(self.W(x))
utter_rep = np.sum(batch_rep * att_w, axis=1)
return utter_rep
Is my implementation/translation/conversion from PyTorch to Tensorflow correct? If not, please edit and help me.
Thank you very much.

2 remarks regarding your implementation:
For custom layers in TF, you should implement the call method instead of the forward method cf Implementing custom layers.
For the operations you should replace the numpy functions by tensorflow functions to enable GPU support.
Here is the code I am using in TF for the SelfAttentionPooling:
import tensorflow as tf
class SelfAttentionPooling(tf.keras.layers.Layer):
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(units=1, use_bias=False)
def call(self, x: tf.Tensor) -> tf.Tensor:
"""Apply the self attention pooling on input tensor.
Args:
x: input tensor (?, seq_len, emb_dim)
Returns:
(?, emb_dim)
"""
# (?, seq_len)
attention_weights = tf.nn.softmax(tf.squeeze(self.dense(x)))
# (?, emb_dim)
pooled = tf.reduce_sum(tf.expand_dims(attention_weights, axis=-1) * x, axis=1)
return pooled
You can quickly check it gives the expected output:
self_attn_pooling = SelfAttentionPooling()
# (?, seq_len, emb_dim)
input_shape = 4, 9, 128
x = tf.random.normal(input_shape)
pooled = self_attn_pooling(x)
# (?, emb_dim)
assert pooled.shape == (4, 128)

Related

tf.reshape(self.normalized_price(prce), (-1, 1)), ValueError: Shape must be rank 1 but is rank 2

I am getting the following error when I am calling the subclass of the model. My guess is that I am not passing the two parameters correctly or the reshape is not outputting the correct value.
ValueError: Shape must be rank 1 but is rank 2 for '{{node base_stock_model/concat}} = ConcatV2[N=3, T=DT_FLOAT, Tidx=DT_INT32](base_stock_model/sequential_2/embedding_2/embedding_lookup/Identity_1, base_stock_model/sequential_3/embedding_3/embedding_lookup/Identity_1, base_stock_model/Reshape, base_stock_model/concat/axis)' with input shapes: [32], [32], [1,1], [].
Here is the main class model
class StockModel(tfrs.models.Model):
def __init__(self, rating_weight: float, retrieval_weight: float) -> None:
super().__init__()
embedding_dimension = 32
self.user_model= UserModel()
self.stock_model= base_stockModel()
self.rating_model = tf.keras.Sequential([
tf.keras.layers.Dense(256, activation="relu"),
tf.keras.layers.Dense(128, activation="relu"),
tf.keras.layers.Dense(1),
])
# The tasks.
self.rating_task: tf.keras.layers.Layer = tfrs.tasks.Ranking(
loss=tf.keras.losses.MeanSquaredError(),
metrics=[tf.keras.metrics.RootMeanSquaredError()],
)
self.retrieval_task: tf.keras.layers.Layer = tfrs.tasks.Retrieval(
metrics=tfrs.metrics.FactorizedTopK(
candidates=stocks.batch(1).map(self.stock_model)
)
)
# The loss weights.
self.rating_weight = rating_weight
self.retrieval_weight = retrieval_weight
def call(self, features: Dict[Text, tf.Tensor]) -> tf.Tensor:
user_embeddings = self.user_model(features['username'])
# np.array([features["name"],features["price"]])
price=tf.as_string(features["price"])
stock_embeddings = self.stock_model([features["name"],price])
return (
user_embeddings,
stock_embeddings,
self.rating_model(
tf.concat([user_embeddings, stock_embeddings], axis=1)
),
)
def compute_loss(self, features: Dict[Text, tf.Tensor], training=False) -> tf.Tensor:
ratings = features.pop("Rating")
print("features",features)
user_embeddings, stock_embeddings, rating_predictions = self(features)
# We compute the loss for each task.
rating_loss = self.rating_task(
labels=ratings,
predictions=rating_predictions,
)
retrieval_loss = self.retrieval_task(user_embeddings, stock_embeddings)
# And combine them using the loss weights.
return (self.rating_weight * rating_loss
+ self.retrieval_weight * retrieval_loss)
The above main class model calls the base_stockModel class, which is causing errors.
class base_stockModel(tf.keras.Model):
def __init__(self):
super().__init__()
embedding_dimension=32
self.stock_embedding = tf.keras.Sequential([
tf.keras.layers.StringLookup(
vocabulary=unique_stock_titles, mask_token=None),
tf.keras.layers.Embedding(len(unique_stock_titles) + 1, embedding_dimension)
])
self.price_embedding=tf.keras.Sequential([
tf.keras.layers.Discretization(prices_bucket.tolist()),
tf.keras.layers.Embedding(len(prices_bucket)+2,32)
])
self.normalized_price = tf.keras.layers.Normalization(axis=None)
self.normalized_price.adapt(prices)
def call(self,input,*args,**kwargs):
print(input.get_shape(),kwargs)
# print(tf.rank(input),[input[:]],input.get_shape(),input.dtype)
# nme=input[3]
nme=input[0]
prce=input[1]
prce=tf.strings.to_number(input[1],out_type=tf.dtypes.float32)
#print(tf.rank(self.stock_embedding(nme)),tf.rank(self.price_embedding(prce)),tf.rank(tf.reshape(sself.normalized_price(prce), (-1, 1))))
return tf.concat([
self.stock_embedding(nme),
self.price_embedding(prce),
tf.reshape(self.normalized_price(prce), (-1, 1)),
], axis=1)
This code is a variant of tensorflow recommender official page https://www.tensorflow.org/recommenders/examples/multitask/
https://www.tensorflow.org/recommenders/examples/context_features
Any help is much appreciated.
First I analyzed all the ranks of input tensor. If they were not the same rank model wants then we have to use tf.reshape() command or adjust the input to match model's demand. Note that, tf.shape() gives you the shape while running the model.
Here is documentation on it.
https://www.tensorflow.org/api_docs/python/tf/reshape
https://www.tensorflow.org/api_docs/python/tf/shape

GCN model is not learning

I am trying to implement a GCN layer using tensorflow, but it is not learning. Can someone check what potential issue could be?
I have tried normalizing the adjacency matrix and even replaced it with identity so that the GCN layer becomes a simple MLP. But there is no change. I think, I have made some fundamental/silly mistake in my implementation which I am not able to find. Can someone let me know what the issue could be?
!pip install numpy
!pip install tensorflow
!pip install spektral
#!pip install tqdm
import numpy as np
import tensorflow as
import spektral
def masked_cross_entropy_loss( labels,logits, mask ):
loss = tf.nn.softmax_cross_entropy_with_logits(labels,logits )
mask = tf.cast(mask, dtype=tf.float32)
# the step below is important, because we need to find mean of only masked nodes
# dividing the mask by its mean = mask * total_nodes/total_masked nodes, comes in handy when we try to take the mean of the loss in the final step
# the total number of nodes that are input to the function are cancelled out between two means of mask and loss
# What remains is only the total number of masked nodes in denominator.
mask /= tf.reduce_mean(mask)
loss *= mask
return tf.reduce_mean(loss)
def masked_accuracy( labels, logits, mask ):
accuracy_array = tf.equal(tf.argmax(logits, axis=1), tf.argmax(labels, axis=1))
accuracy_array = tf.cast(accuracy_array, dtype =tf.float32)
mask = tf.cast(mask, dtype = tf.float32)
mask/= tf.reduce_mean(mask)
accuracy_array *= mask
return tf.reduce_mean(accuracy_array)
class GCNLayer:
def __init__( self, A ):
self.A = A
def _transform( self, units, features, trans_func ):
if trans_func == 'dense':
features = tf.keras.layers.Dense(units)( features )
features = tf.cast(features, dtype=tf.float32)
return features
else:
raise Exception('Transformation function not implemented')
def _aggregate( self, features, agg_func ):
if agg_func == 'adj_matmul':
return self.A # features
else:
raise Exception('Aggregation function not implemented')
def _activate( self, features, activation ):
features = tf.keras.layers.Activation(activation)( features)
return features
def __call__( self, units, features, trans_func='dense', agg_func = 'adj_matmul', activation='relu' ):
features = self._transform(units, features, trans_func )
features = self._aggregate(features, agg_func)
if activation is not None:
features = self._activate(features, activation)
return features
class MyModel:
def __init__( self, A, node_features, node_labels, train_mask, val_mask, test_mask ):
self.A = A
self.node_features = node_features
self.node_labels = node_labels
self.train_mask = train_mask
self.val_mask = val_mask
self.test_mask = test_mask
self.gcn_layer1 = GCNLayer(self.A)
self.gcn_layer2 = GCNLayer(self.A)
def __call__( self ):
hidden_out = self.gcn_layer1(32, self.node_features, activation='relu' )
output = self.gcn_layer2(7, hidden_out, activation=None)
return output
def train( self, num_epochs=1, lr =0.01 ):
optimizer = tf.keras.optimizers.Adam(lr)
best_val_acc = 0.0
for e in range(num_epochs):
with tf.GradientTape() as t:
logits = self()
train_loss = masked_cross_entropy_loss( self.node_labels, logits, self.train_mask )
variables = t.watched_variables()
grads = t.gradient(train_loss, variables)
optimizer.apply_gradients(zip(grads, variables))
logits = self()
train_acc = masked_accuracy( self.node_labels, logits, self.train_mask )
val_acc = masked_accuracy( self.node_labels, logits, self.val_mask )
if val_acc > best_val_acc:
best_val_acc = val_acc
print(f'epoch={e},Training Loss:{train_loss.numpy()},Training Accuracy:{train_acc.numpy()}, Validation Accuracy:{val_acc.numpy()}')
model = MyModel(A, node_features, node_labels, train_mask, val_mask, test_mask)
model.train(num_epochs=200, lr=0.01)
Output
epoch=0,Training Loss:4.099794864654541,Training Accuracy:0.1428571492433548, Validation Accuracy:0.09000000357627869
epoch=1,Training Loss:6.438627243041992,Training Accuracy:0.20714285969734192, Validation Accuracy:0.16599997878074646
epoch=5,Training Loss:5.980966091156006,Training Accuracy:0.17142857611179352, Validation Accuracy:0.17399999499320984
epoch=13,Training Loss:3.9486303329467773,Training Accuracy:0.15000000596046448, Validation Accuracy:0.2800000011920929
epoch=40,Training Loss:5.182331562042236,Training Accuracy:0.23571430146694183, Validation Accuracy:0.29600000381469727
epoch=158,Training Loss:6.245728969573975,Training Accuracy:0.2142857164144516, Validation Accuracy:0.3160000145435333
Your model is learning but doesn't converge. Consider checking/adding data ,use simpler model, or tuning parameters while training (e.g: learning rate, batches size).
I found the problem in my code. I had instantiated the tf.keras.Dense layer in the call function which was causing the weights to be initialized on every epoch confusing the GradientTape.

Unable to understand the behavior of method `build` in tensorflow keras layers (tf.keras.layers.Layer)

Layers in tensorflow keras have a method build that is used to defer the weights creation to a time when you have seen what the input is going to be. a layer's build method
I have a few questions i have not been able to find the answer of:
here it is said that
If you assign a Layer instance as attribute of another Layer, the outer layer will start tracking the weights of the inner layer.
What does it mean to track the weights of a layer?
The same link also mentions that
We recommend creating such sublayers in the init method (since the sublayers will typically have a build method, they will be built when the outer layer gets built).
Does it mean that while running the build method of child class (self), there will an iteration through all the attributes of self and whichever are found to be subclassed from (instances of) tf.keras.layer.Layer will have their build methods run automatically?
I can run this code:
class Net(tf.keras.Model):
"""A simple linear model."""
def __init__(self):
super(Net, self).__init__()
self.l1 = tf.keras.layers.Dense(5)
def call(self, x):
return self.l1(x)
net = Net()
print(net.variables)
But not this:
class Net(tf.keras.Model):
"""A simple linear model."""
def __init__(self):
super(Net, self).__init__()
self.l1 = tf.keras.layers.Dense(5)
def build(self,input_shape):
super().build()
def call(self, x):
return self.l1(x)
net = Net()
print(net.variables)
why?
I would say the build mentioned means, when you build a self-defined tf.keras.Model for example
net = Net()
then you will get all the tf.keras.layers.Layer objects create in __init__, and being stored in net which is a callable object. In this case, it will become a completed object for TF to train later, this is what it said to track. The next time you call net(inputs) you'll can get your outputs.
Here is a example of Tensorflow self-defined decoder with attention
class BahdanauAttention(tf.keras.layers.Layer):
def __init__(self, units):
super(BahdanauAttention, self).__init__()
self.W1 = tf.keras.layers.Dense(units)
self.W2 = tf.keras.layers.Dense(units)
self.V = tf.keras.layers.Dense(1)
def call(self, query, values):
# query hidden state shape == (batch_size, hidden size)
# query_with_time_axis shape == (batch_size, 1, hidden size)
# values shape == (batch_size, max_len, hidden size)
# we are doing this to broadcast addition along the time axis to calculate the score
query_with_time_axis = tf.expand_dims(query, 1)
# score shape == (batch_size, max_length, 1)
# we get 1 at the last axis because we are applying score to self.V
# the shape of the tensor before applying self.V is (batch_size, max_length, units)
score = self.V(tf.nn.tanh(
self.W1(query_with_time_axis) + self.W2(values)))
# attention_weights shape == (batch_size, max_length, 1)
attention_weights = tf.nn.softmax(score, axis=1)
# context_vector shape after sum == (batch_size, hidden_size)
context_vector = attention_weights * values
context_vector = tf.reduce_sum(context_vector, axis=1)
return context_vector, attention_weights
class Decoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, dec_units, batch_sz):
super(Decoder, self).__init__()
self.batch_sz = batch_sz
self.dec_units = dec_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = tf.keras.layers.GRU(self.dec_units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
self.fc = tf.keras.layers.Dense(vocab_size)
# used for attention
self.attention = BahdanauAttention(self.dec_units)
def call(self, x, hidden, enc_output):
# enc_output shape == (batch_size, max_length, hidden_size)
context_vector, attention_weights = self.attention(hidden, enc_output)
# x shape after passing through embedding == (batch_size, 1, embedding_dim)
x = self.embedding(x)
# x shape after concatenation == (batch_size, 1, embedding_dim + hidden_size)
x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)
# passing the concatenated vector to the GRU
output, state = self.gru(x)
# output shape == (batch_size * 1, hidden_size)
output = tf.reshape(output, (-1, output.shape[2]))
# output shape == (batch_size, vocab)
x = self.fc(output)
return x, state, attention_weights
I have tried to put tf.keras.layers.Layer object in call and got really poor outcome, guess that was because if you put it in call then it will be call multiple times while each time a forward-backward propagation happends.

Custom Attention Layer using in Keras

I want to create a custom attention layer that for input at any time this layer returns the weighted mean of inputs at all time inputs.
For Example, I want that input tensor with shape [32,100,2048] goes to layer and I get the tensor with the shape [32,100,2048]. I wrote the Layer as follow:
import tensorflow as tf
from keras.layers import Layer, Dense
#or
from tensorflow.keras.layers import Layer, Dense
class Attention(Layer):
def __init__(self, units_att):
self.units_att = units_att
self.W = Dense(units_att)
self.V = Dense(1)
super().__init__()
def __call__(self, values):
t = tf.constant(0, dtype= tf.int32)
time_steps = tf.shape(values)[1]
initial_outputs = tf.TensorArray(dtype=tf.float32, size=time_steps)
initial_att = tf.TensorArray(dtype=tf.float32, size=time_steps)
def should_continue(t, *args):
return t < time_steps
def iteration(t, values, outputs, atts):
score = self.V(tf.nn.tanh(self.W(values)))
# attention_weights shape == (batch_size, time_step, 1)
attention_weights = tf.nn.softmax(score, axis=1)
# context_vector shape after sum == (batch_size, hidden_size)
context_vector = attention_weights * values
context_vector = tf.reduce_sum(context_vector, axis=1)
outputs = outputs.write(t, context_vector)
atts = atts.write(t, attention_weights)
return t + 1, values, outputs, atts
t, values, outputs, atts = tf.while_loop(should_continue, iteration,
[t, values, initial_outputs, initial_att])
outputs = outputs.stack()
outputs = tf.transpose(outputs, [1,0,2])
atts = atts.stack()
atts = tf.squeeze(atts, -1)
atts = tf.transpose(atts, [1,0,2])
return t, values, outputs, atts
For input= tf.constant(2, shape= [32, 100, 2048], dtype= tf.float32) I get the
output with shape = [32,100,2048] in tf2 and [32,None, 2048] in tf1.
For Input input= Input(shape= (None, 2048)) I get the output with shape = [None, None, 2048] in tf1 and I get error
TypeError: 'Tensor' object cannot be interpreted as an integer
in tf2.
Finally, in both cases, I can't use this layer in my model because my model input is Input(shape= (None, 2048)) and I get the error
AttributeError: 'NoneType' object has no attribute '_inbound_nodes'
in tf1 and in tf2 I get the same error as said in above, I create my model with Keras functional method.
From the code you have shared, looks like you want to implement Bahdanau's attention layer in your code. You want to attend to all the 'values' (prev layer output - all its hidden states) and your 'query' would be the last hidden state of the decoder. Your code should actually be very simple and should look like:
class Bahdanau(tf.keras.layers.Layer):
def __init__(self, n):
super(Bahdanau, self).__init__()
self.w = tf.keras.layers.Dense(n)
self.u = tf.keras.layers.Dense(n)
self.v = tf.keras.layers.Dense(1)
def call(self, query, values):
query = tf.expand_dims(query, 1)
e = self.v(tf.nn.tanh(self.w(query) + self.u(values)))
a = tf.nn.softmax(e, axis=1)
c = a * values
c = tf.reduce_sum(c, axis=1)
return a,c
##Say we want 10 units in the single layer MLP determining w,u
attentionlayer = Bahdanau(10)
##Call with i/p: decoderstate # t-1 and all encoder hidden states
a, c = attentionlayer(stminus1, hj)
We are not specifying the tensor shape anywhere in the code. This code will return you a context tensor of same size as 'stminus1' which is the 'query'. It does this after attending to all the 'values' (all output states of decoder) using Bahdanau's attention mechanism.
So assuming your batch size is 32, timesteps=100 and embedding dimension=2048, the shape of stminus1 should be (32,2048) and the shape of the hj should be (32,100,2048). The shape of the output context would be (32,2048). We also returned the 100 attention weights just in case you want to route them to a nice display.
This is the simplest version of 'Attention'. If you have any other intent, please let me know and I will reformat my answer. For more specific details, please refer https://towardsdatascience.com/create-your-own-custom-attention-layer-understand-all-flavours-2201b5e8be9e

Create keras callback to save model predictions and targets for each batch during training

I am building a simple Sequential model in Keras (tensorflow backend). During training I want to inspect the individual training batches and model predictions. Therefore, I am trying to create a custom Callback that saves the model predictions and targets for each training batch. However, the model is not using the current batch for prediction, but the entire training data.
How can I hand over only the current training batch to the Callback?
And how can I access the batches and targets that the Callback saves in self.predhis and self.targets?
My current version looks as follows:
callback_list = [prediction_history((self.x_train, self.y_train))]
self.model.fit(self.x_train, self.y_train, batch_size=self.batch_size, epochs=self.n_epochs, validation_data=(self.x_val, self.y_val), callbacks=callback_list)
class prediction_history(keras.callbacks.Callback):
def __init__(self, train_data):
self.train_data = train_data
self.predhis = []
self.targets = []
def on_batch_end(self, epoch, logs={}):
x_train, y_train = self.train_data
self.targets.append(y_train)
prediction = self.model.predict(x_train)
self.predhis.append(prediction)
tf.logging.info("Prediction shape: {}".format(prediction.shape))
tf.logging.info("Targets shape: {}".format(y_train.shape))
NOTE: this answer is outdated and only works with TF1. Check #bers's answer for a solution tested on TF2.
After model compilation, the placeholder tensor for y_true is in model.targets and y_pred is in model.outputs.
To save the values of these placeholders at each batch, you can:
First copy the values of these tensors into variables.
Evaluate these variables in on_batch_end, and store the resulting arrays.
Now step 1 is a bit involved because you'll have to add an tf.assign op to the training function model.train_function. Using current Keras API, this can be done by providing a fetches argument to K.function() when the training function is constructed.
In model._make_train_function(), there's a line:
self.train_function = K.function(inputs,
[self.total_loss] + self.metrics_tensors,
updates=updates,
name='train_function',
**self._function_kwargs)
The fetches argument containing the tf.assign ops can be provided via model._function_kwargs (only works after Keras 2.1.0).
As an example:
from keras.layers import Dense
from keras.models import Sequential
from keras.callbacks import Callback
from keras import backend as K
import tensorflow as tf
import numpy as np
class CollectOutputAndTarget(Callback):
def __init__(self):
super(CollectOutputAndTarget, self).__init__()
self.targets = [] # collect y_true batches
self.outputs = [] # collect y_pred batches
# the shape of these 2 variables will change according to batch shape
# to handle the "last batch", specify `validate_shape=False`
self.var_y_true = tf.Variable(0., validate_shape=False)
self.var_y_pred = tf.Variable(0., validate_shape=False)
def on_batch_end(self, batch, logs=None):
# evaluate the variables and save them into lists
self.targets.append(K.eval(self.var_y_true))
self.outputs.append(K.eval(self.var_y_pred))
# build a simple model
# have to compile first for model.targets and model.outputs to be prepared
model = Sequential([Dense(5, input_shape=(10,))])
model.compile(loss='mse', optimizer='adam')
# initialize the variables and the `tf.assign` ops
cbk = CollectOutputAndTarget()
fetches = [tf.assign(cbk.var_y_true, model.targets[0], validate_shape=False),
tf.assign(cbk.var_y_pred, model.outputs[0], validate_shape=False)]
model._function_kwargs = {'fetches': fetches} # use `model._function_kwargs` if using `Model` instead of `Sequential`
# fit the model and check results
X = np.random.rand(10, 10)
Y = np.random.rand(10, 5)
model.fit(X, Y, batch_size=8, callbacks=[cbk])
Unless the number of samples can be divided by the batch size, the final batch will have a different size than other batches. So K.variable() and K.update() can't be used in this case. You'll have to use tf.Variable(..., validate_shape=False) and tf.assign(..., validate_shape=False) instead.
To verify the correctness of the saved arrays, you can add one line in training.py to print out the shuffled index array:
if shuffle == 'batch':
index_array = _batch_shuffle(index_array, batch_size)
elif shuffle:
np.random.shuffle(index_array)
print('Index array:', repr(index_array)) # Add this line
batches = _make_batches(num_train_samples, batch_size)
The shuffled index array should be printed out during fitting:
Epoch 1/1
Index array: array([8, 9, 3, 5, 4, 7, 1, 0, 6, 2])
10/10 [==============================] - 0s 23ms/step - loss: 0.5670
And you can check if cbk.targets is the same as Y[index_array]:
index_array = np.array([8, 9, 3, 5, 4, 7, 1, 0, 6, 2])
print(Y[index_array])
[[ 0.75325592 0.64857277 0.1926653 0.7642865 0.38901153]
[ 0.77567689 0.13573623 0.4902501 0.42897559 0.55825652]
[ 0.33760938 0.68195038 0.12303088 0.83509441 0.20991668]
[ 0.98367778 0.61325065 0.28973401 0.28734073 0.93399794]
[ 0.26097574 0.88219054 0.87951941 0.64887846 0.41996446]
[ 0.97794604 0.91307569 0.93816428 0.2125808 0.94381495]
[ 0.74813435 0.08036688 0.38094272 0.83178364 0.16713736]
[ 0.52609421 0.39218962 0.21022047 0.58569125 0.08012982]
[ 0.61276627 0.20679494 0.24124858 0.01262245 0.0994412 ]
[ 0.6026137 0.25620512 0.7398164 0.52558182 0.09955769]]
print(cbk.targets)
[array([[ 0.7532559 , 0.64857274, 0.19266529, 0.76428652, 0.38901153],
[ 0.77567691, 0.13573623, 0.49025011, 0.42897558, 0.55825651],
[ 0.33760938, 0.68195039, 0.12303089, 0.83509439, 0.20991668],
[ 0.9836778 , 0.61325067, 0.28973401, 0.28734073, 0.93399793],
[ 0.26097575, 0.88219053, 0.8795194 , 0.64887846, 0.41996446],
[ 0.97794604, 0.91307569, 0.93816429, 0.2125808 , 0.94381493],
[ 0.74813437, 0.08036689, 0.38094273, 0.83178365, 0.16713737],
[ 0.5260942 , 0.39218962, 0.21022047, 0.58569127, 0.08012982]], dtype=float32),
array([[ 0.61276627, 0.20679495, 0.24124858, 0.01262245, 0.0994412 ],
[ 0.60261369, 0.25620511, 0.73981643, 0.52558184, 0.09955769]], dtype=float32)]
As you can see, there are two batches in cbk.targets (one "full batch" of size 8 and the final batch of size 2), and the row order is the same as Y[index_array].
Long edit (almost a new answer) for the following reasons:
Yu-Yang's 2017 answer relies on the private _make_train_function and _function_kwargs APIs, which work only in TF1 (and maybe in TF1 compatibility, so-called non-eager mode).
Similarly, Binyan Hu's 2020 answer relies on _make_test_function and does not work in TF2 by default (requiring non-eager mode as well).
My own Jan 2020 answer, which was already subject to several required configuration settings, seems to have stopped working with (or before) TF 2.5, and I was not able to make model.inputs or model.outputs work any longer.
Finally, the earlier version of this answer requires potentially expensive model evaluation to obtain the predictions for each batch. A similar solution to obtain activation histograms even led to OOM issues with repeated training of different models.
So I set out find a way to obtain all possible quantities (inputs, targets, predictions, activations), batch-wise, without using any private APIs. The aim was to be able to call .numpy() on the intended quantities, so Keras callbacks can run ordinary Python code to ease debugging (I suppose that is what this question is mainly about - for maximum performance, one would probably try to integrate as many computations as possible into TensorFlow's graph operations anyway).
This is the common base model for all solutions:
"""Demonstrate batch data access."""
import tensorflow as tf
from tensorflow import keras
class DataCallback(keras.callbacks.Callback):
"""This class is where all implementations differ."""
def tf_nan(dtype):
"""Create NaN variable of proper dtype and variable shape for assign()."""
return tf.Variable(float("nan"), dtype=dtype, shape=tf.TensorShape(None))
def main():
"""Run main."""
model = keras.Sequential([keras.layers.Dense(1, input_shape=(2,))])
callback = DataCallback()
model.compile(loss="mse", optimizer="adam")
model.fit(
x=tf.transpose(tf.range(7.0) + [[0.2], [0.4]]),
y=tf.transpose(tf.range(7.0) + 10 + [[0.5]]),
validation_data=(
tf.transpose(tf.range(11.0) + 30 + [[0.6], [0.7]]),
tf.transpose(tf.range(11.0) + 40 + [[0.9]]),
),
shuffle=False,
batch_size=3,
epochs=2,
verbose=0,
callbacks=[callback],
)
model.save("tmp.tf")
if __name__ == "__main__":
main()
The following three snippets show one possible solution each, each with their own pros and cons. The core trick is always the same: allocate a tf.Variable and use tf.Variable.assign to export the intended quantity, from some Keras code run in graph mode, into the callback. The methods differ slightly in callback initialization and (in one case) model compilation, and most importantly, in the quantities they can access, which is why I summarize them above each snippet.
Custom metric
Using a custom (fake) metric (similar to my Jan 2020 answer), while we cannot seem to access model.inputs nor model.outputs any more (and model.(_)targets does not even exist any longer), we can access y_true and y_pred, which represent the model targets and outputs:
[ ] Inputs/Samples (x)
[ ] Weights (w)
[+] Targets/Labels (y_true)
[+] Outputs/Predictions (y_pred)
[ ] All layers (or only final input/output layers)
"""Demonstrate batch data access using a custom metric."""
import tensorflow as tf
from tensorflow import keras
class DataCallback(keras.callbacks.Callback): # diff
"""Callback to operate on batch data from metric."""
def __init__(self):
"""Offer a metric to access batch data."""
super().__init__()
self.y_true = None
self.y_pred = None
def set_model(self, model):
"""Initialize variables when model is set."""
self.y_true = tf_nan(model.output.dtype)
self.y_pred = tf_nan(model.output.dtype)
def metric(self, y_true, y_pred):
"""Fake metric."""
self.y_true.assign(y_true)
self.y_pred.assign(y_pred)
return 0
def on_train_batch_end(self, _batch, _logs=None):
"""See keras.callbacks.Callback.on_train_batch_end."""
print("y_true =", self.y_true.numpy())
print("y_pred =", self.y_pred.numpy())
def on_train_end(self, _logs=None):
"""Clean up."""
del self.y_true, self.y_pred
def tf_nan(dtype):
"""Create NaN variable of proper dtype and variable shape for assign()."""
return tf.Variable(float("nan"), dtype=dtype, shape=tf.TensorShape(None))
def main():
"""Run main."""
model = keras.Sequential([keras.layers.Dense(1, input_shape=(2,))])
callback = DataCallback()
model.compile(loss="mse", optimizer="adam", metrics=[callback.metric]) # diff
model.fit(
x=tf.transpose(tf.range(7.0) + [[0.2], [0.4]]),
y=tf.transpose(tf.range(7.0) + 10 + [[0.5]]),
validation_data=(
tf.transpose(tf.range(11.0) + 30 + [[0.6], [0.7]]),
tf.transpose(tf.range(11.0) + 40 + [[0.9]]),
),
shuffle=False,
batch_size=3,
epochs=2,
verbose=0,
callbacks=[callback],
)
model.save("tmp.tf")
if __name__ == "__main__":
main()
Custom training step
A custom training step is what I used in an earlier version of this answer. The idea still works in principle, but y_pred can be expensive and it might make sense to use a custom metric (see above) if that is required.
[+] Inputs/Samples (x)
[+] Weights (w)
[+] Targets/Labels (y_true)
[~] Outputs/Predictions (y_pred) [expensive!]
[ ] All layers (or only final input/output layers)
"""Demonstrate batch data access using a custom training step."""
import tensorflow as tf
from tensorflow import keras
class DataCallback(keras.callbacks.Callback): # diff
"""Callback to operate on batch data from training step."""
def __init__(self):
"""Initialize tf.Variables."""
super().__init__()
self.x = None
self.w = None
self.y_true = None
self.y_pred = None
def set_model(self, model):
"""Wrap the model.train_step function to access training batch data."""
self.x = tf_nan(model.input.dtype)
# pylint:disable=protected-access (replace by proper dtype if you know it)
if model.compiled_loss._user_loss_weights is not None:
self.w = tf_nan(model.compiled_loss._user_loss_weights.dtype)
self.y_true = tf_nan(model.output.dtype)
self.y_pred = tf_nan(model.output.dtype)
model_train_step = model.train_step
def outer_train_step(data):
# https://github.com/keras-team/keras/blob/v2.7.0/keras/engine/training.py
x, y_true, w = keras.utils.unpack_x_y_sample_weight(data)
self.x.assign(x)
if w is not None:
self.w.assign(w)
self.y_true.assign(y_true)
result = model_train_step(data)
y_pred = model(x)
self.y_pred.assign(y_pred)
return result
model.train_step = outer_train_step
def on_train_batch_end(self, _batch, _logs=None):
"""See keras.callbacks.Callback.on_train_batch_end."""
print("x =", self.x.numpy())
if self.w is not None:
print("w =", self.w.numpy())
print("y_true =", self.y_true.numpy())
print("y_pred =", self.y_pred.numpy())
def on_train_end(self, _logs=None):
"""Clean up."""
del self.x, self.w, self.y_true, self.y_pred
def tf_nan(dtype):
"""Create NaN variable of proper dtype and variable shape for assign()."""
return tf.Variable(float("nan"), dtype=dtype, shape=tf.TensorShape(None))
def main():
"""Run main."""
model = keras.Sequential([keras.layers.Dense(1, input_shape=(2,))])
callback = DataCallback()
model.compile(loss="mse", optimizer="adam")
model.fit(
x=tf.transpose(tf.range(7.0) + [[0.2], [0.4]]),
y=tf.transpose(tf.range(7.0) + 10 + [[0.5]]),
validation_data=(
tf.transpose(tf.range(11.0) + 30 + [[0.6], [0.7]]),
tf.transpose(tf.range(11.0) + 40 + [[0.9]]),
),
shuffle=False,
batch_size=3,
epochs=2,
verbose=0,
callbacks=[callback],
)
model.save("tmp.tf")
if __name__ == "__main__":
main()
Custom layer call
A custom layer call is a super-flexible way of accessing each layer's inputs and outputs. The callback handles patching of the call functions for a list of layers. While we cannot access weights and targets (as these quantitities do not make sense at the level of individual layers), it allows us to access individual layer activations, which can be handy for questions such as How does one log activations using `tf.keras.callbacks.TensorBoard`?.
[+] Inputs/Samples (x)
[ ] Weights (w)
[ ] Targets/Labels (y_true)
[+] Outputs/Predictions (y_pred)
[+] All layers (or only final input/output layers)
"""Demonstrate batch data access using custom layer calls."""
import tensorflow as tf
from tensorflow import keras
class DataCallback(keras.callbacks.Callback): # diff
"""Callback to operate on batch data from selected (to be wrapped) layers."""
def __init__(self, layers):
"""Wrap the calls of an iterable of model layers to access layer batch data."""
super().__init__()
self.data = {}
self.inner_calls = {}
self.outer_calls = {}
for layer in layers:
self.data[layer] = {
"inputs": tf_nan(layer.input.dtype),
"outputs": tf_nan(layer.output.dtype),
}
self.inner_calls[layer] = layer.call
def outer_call(inputs, layer=layer, layer_call=layer.call):
self.data[layer]["inputs"].assign(inputs)
outputs = layer_call(inputs)
self.data[layer]["outputs"].assign(outputs)
return outputs
self.outer_calls[layer] = outer_call
def on_train_batch_begin(self, _epoch, _logs=None):
"""Wrap layer calls during each batch."""
for layer, call in self.outer_calls.items():
layer.call = call
def on_train_batch_end(self, _epoch, _logs=None):
"""Restore original layer calls for ModelCheckpoint, model.save, ..."""
for layer, call in self.inner_calls.items():
layer.call = call
for layer, data in self.data.items():
print("Layer =", layer)
print("Inputs =", data["inputs"].numpy())
print("Outputs =", data["outputs"].numpy())
def tf_nan(dtype):
"""Create NaN variable of proper dtype and variable shape for assign()."""
return tf.Variable(float("nan"), dtype=dtype, shape=tf.TensorShape(None))
def main():
"""Run main."""
model = keras.Sequential([keras.layers.Dense(1, input_shape=(2,))])
callback = DataCallback(model.layers) # diff
model.compile(loss="mse", optimizer="adam")
model.fit(
x=tf.transpose(tf.range(7.0) + [[0.2], [0.4]]),
y=tf.transpose(tf.range(7.0) + 10 + [[0.5]]),
validation_data=(
tf.transpose(tf.range(11.0) + 30 + [[0.6], [0.7]]),
tf.transpose(tf.range(11.0) + 40 + [[0.9]]),
),
shuffle=False,
batch_size=3,
epochs=2,
verbose=0,
callbacks=[callback],
)
model.save("tmp.tf")
if __name__ == "__main__":
main()
When to use which and open to-dos
I think the snippets above each solution nicely summarize what each approach is capable of. Generally,
a custom training step will be ideal to access the model input, such as batched dataset generators, effects of shuffling, etc;
a custom layer call is ideal to access the in-betweens of the model; and
a custom metric is ideal to access the outputs of the model.
I am fairly certain (but have not tried) that one can combine all approaches to be able to access all batch quantities simultaneously. I have not tested anything but training mode - each method can have further pros and cons relating to their usefulness in testing or prediction mode. Finally, I assume, but have not tested either, that their should be only minor differences between tf.keras and keras. Having tested this code on TF2.8.rc1 and Keras 2.8.0, which has moved the tf.keras code back into the keras pip package, and not using any private APIs, I believe this assumption is justified.
It would be great if this approach could be extended to access model.inputs and model.outputs again. Currently, I am getting errors such as this one:
TypeError: You are passing KerasTensor(...), an intermediate Keras symbolic input/output, to a TF API that does not allow registering custom dispatchers, such as tf.cond, tf.function, gradient tapes, or tf.map_fn. Keras Functional model construction only supports TF API calls that do support dispatching, such as tf.math.add or tf.reshape. Other APIs cannot be called directly on symbolic Kerasinputs/outputs. You can work around this limitation by putting the operation in a custom Keras layer call and calling that layer on this symbolic input/output.
Previous answer
From TF 2.2 on, you can use custom training steps rather than callbacks to achieve what you want. Here's a demo that works with tensorflow==2.2.0rc1, using inheritance to improve the keras.Sequential model. Performance-wise, this is not ideal as predictions are made twice, once in self(x, training=True) and once in super().train_step(data). But you get the idea.
This works in eager mode and does not use private APIs, so it should be pretty stable. One caveat is that you have to use tf.keras (standalone keras does not support Model.train_step), but I feel standalone keras is becoming more and more deprecated anyway. (In fact, tf.keras migrates to keras in TF2.8.)
"""Demonstrate access to Keras batch tensors in a tf.keras custom training step."""
import numpy as np
from tensorflow import keras
from tensorflow.keras import backend as K
from tensorflow.python.keras.engine import data_adapter
in_shape = (2,)
out_shape = (1,)
batch_size = 3
n_samples = 7
class SequentialWithPrint(keras.Sequential):
def train_step(self, original_data):
# Basically copied one-to-one from https://git.io/JvDTv
data = data_adapter.expand_1d(original_data)
x, y_true, w = data_adapter.unpack_x_y_sample_weight(data)
y_pred = self(x, training=True)
# this is pretty much like on_train_batch_begin
K.print_tensor(w, "Sample weight (w) =")
K.print_tensor(x, "Batch input (x) =")
K.print_tensor(y_true, "Batch output (y_true) =")
K.print_tensor(y_pred, "Prediction (y_pred) =")
result = super().train_step(original_data)
# add anything here for on_train_batch_end-like behavior
return result
# Model
model = SequentialWithPrint([keras.layers.Dense(out_shape[0], input_shape=in_shape)])
model.compile(loss="mse", optimizer="adam")
# Example data
X = np.random.rand(n_samples, *in_shape)
Y = np.random.rand(n_samples, *out_shape)
model.fit(X, Y, batch_size=batch_size)
print("X: ", X)
print("Y: ", Y)
Finally, here is a simpler example without inheritance:
"""Demonstrate access to Keras batch tensors in a tf.keras custom training step."""
import tensorflow as tf
IN_SHAPE = (2,)
OUT_SHAPE = (1,)
BATCH_SIZE = 3
N_SAMPLES = 7
def make_print_data_and_train_step(keras_model):
"""Return a train_step function that prints data batches."""
original_train_step = keras_model.train_step
def print_data_and_train_step(data):
# Adapted from https://git.io/JvDTv, skipping data_adapter.expand_1d
x, y_true, w = tf.keras.utils.unpack_x_y_sample_weight(data)
y_pred = keras_model(x, training=True)
# this is pretty much like on_train_batch_begin
tf.keras.backend.print_tensor(w, "Sample weight (w) =")
tf.keras.backend.print_tensor(x, "Batch input (x) =")
tf.keras.backend.print_tensor(y_true, "Batch output (y_true) =")
tf.keras.backend.print_tensor(y_pred, "Prediction (y_pred) =")
result = original_train_step(data)
# add anything here for on_train_batch_end-like behavior
return result
return print_data_and_train_step
# Model
model = tf.keras.Sequential([tf.keras.layers.Dense(OUT_SHAPE[0], input_shape=IN_SHAPE)])
model.train_step = make_print_data_and_train_step(model)
model.compile(loss="mse", optimizer="adam")
# Example data
X = tf.random.normal((N_SAMPLES, *IN_SHAPE))
Y = tf.random.normal((N_SAMPLES, *OUT_SHAPE))
model.fit(X, Y, batch_size=BATCH_SIZE)
print("X: ", X)
print("Y: ", Y)
Update: This approach has stopped working. See my other answer a number of solutions compatible with TF2.8 (and hopefully beyond).
One problem with #Yu-Yang's solution is that it relies on model._function_kwargs, which is not guaranteed to work as it is not part of the API. In particular, in TF2 with eager execution, session kwargs seem to be either not accepted at all or run preemptively due to eager mode.
Therefore, here is my solution tested on tensorflow==2.1.0. The trick is to replace fetches by a Keras metric, in which the assignment operations from fetches are made during training.
This even enables a Keras-only solution if the batch size divides the number of samples; otherwise, another trick has to be applied when initializing TensorFlow variables with a None shape, similar to validate_shape=False in earlier solutions (compare https://github.com/tensorflow/tensorflow/issues/35667).
Importantly, tf.keras behaves differently from keras (sometimes just ignoring assignments, or seeing variables as Keras symbolic tensors), so this updated solution takes care of both implementations (Keras==2.3.1 and tensorflow==2.1.0).
"""Demonstrate access to Keras symbolic tensors in a (tf.)keras.Callback."""
import numpy as np
import tensorflow as tf
use_tf_keras = True
if use_tf_keras:
from tensorflow import keras
from tensorflow.keras import backend as K
tf.config.experimental_run_functions_eagerly(False)
compile_kwargs = {"run_eagerly": False, "experimental_run_tf_function": False}
else:
import keras
from keras import backend as K
compile_kwargs = {}
in_shape = (2,)
out_shape = (1,)
batch_size = 3
n_samples = 7
class CollectKerasSymbolicTensorsCallback(keras.callbacks.Callback):
"""Collect Keras symbolic tensors."""
def __init__(self):
"""Initialize intermediate variables for batches and lists."""
super().__init__()
# Collect batches here
self.inputs = []
self.targets = []
self.outputs = []
# # For a pure Keras solution, we need to know the shapes beforehand;
# # in particular, batch_size must divide n_samples:
# self.input = K.variable(np.empty((batch_size, *in_shape)))
# self.target = K.variable(np.empty((batch_size, *out_shape)))
# self.output = K.variable(np.empty((batch_size, *out_shape)))
# If the shape of these variables will change (e.g., last batch), initialize
# arbitrarily and specify `shape=tf.TensorShape(None)`:
self.input = tf.Variable(0.0, shape=tf.TensorShape(None))
self.target = tf.Variable(0.0, shape=tf.TensorShape(None))
self.output = tf.Variable(0.0, shape=tf.TensorShape(None))
def on_batch_end(self, batch, logs=None):
"""Evaluate the variables and save them into lists."""
self.inputs.append(K.eval(self.input))
self.targets.append(K.eval(self.target))
self.outputs.append(K.eval(self.output))
def on_train_end(self, logs=None):
"""Print all variables."""
print("Inputs: ", *self.inputs)
print("Targets: ", *self.targets)
print("Outputs: ", *self.outputs)
#tf.function
def assign_keras_symbolic_tensors_metric(_foo, _bar):
"""
Return the assignment operations as a metric to have them evaluated by Keras.
This replaces `fetches` from the TF1/non-eager-execution solution.
"""
# Collect assignments as list of (dest, src)
assignments = (
(callback.input, model.inputs[0]),
(callback.target, model._targets[0] if use_tf_keras else model.targets[0]),
(callback.output, model.outputs[0]),
)
for (dest, src) in assignments:
dest.assign(src)
return 0
callback = CollectKerasSymbolicTensorsCallback()
metrics = [assign_keras_symbolic_tensors_metric]
# Example model
model = keras.Sequential([keras.layers.Dense(out_shape[0], input_shape=in_shape)])
model.compile(loss="mse", optimizer="adam", metrics=metrics, **compile_kwargs)
# Example data
X = np.random.rand(n_samples, *in_shape)
Y = np.random.rand(n_samples, *out_shape)
model.fit(X, Y, batch_size=batch_size, callbacks=[callback])
print("X: ", X)
print("Y: ", Y)
Inspired by the way tf.keras.callbacks.TesnsorBoard saves v1 (graph) summaries.
No variable assignments and no redundant metrics.
For use with tensorflow>=2.0.0, graph (disable eager) mode during evaluating.
Extensive operations on the numpy predictions can be implemented by overriding SavePrediction._pred_callback.
import numpy as np
import tensorflow as tf
from tensorflow import keras
tf.compat.v1.disable_eager_execution()
in_shape = (2,)
out_shape = (1,)
batch_size = 2
n_samples = 32
class SavePrediction(keras.callbacks.Callback):
def __init__(self):
super().__init__()
self._get_pred = None
self.preds = []
def _pred_callback(self, preds):
self.preds.append(preds)
def set_model(self, model):
super().set_model(model)
if self._get_pred is None:
self._get_pred = self.model.outputs[0]
def on_test_begin(self, logs):
# pylint: disable=protected-access
self.model._make_test_function()
# pylint: enable=protected-access
if self._get_pred not in self.model.test_function.fetches:
self.model.test_function.fetches.append(self._get_pred)
self.model.test_function.fetch_callbacks[self._get_pred] = self._pred_callback
def on_test_end(self, logs):
if self._get_pred in self.model.test_function.fetches:
self.model.test_function.fetches.remove(self._get_pred)
if self._get_pred in self.model.test_function.fetch_callbacks:
self.model.test_function.fetch_callbacks.pop(self._get_pred)
print(self.preds)
model = keras.Sequential([
keras.layers.Dense(out_shape[0], input_shape=in_shape)
])
model.compile(loss="mse", optimizer="adam")
X = np.random.rand(n_samples, *in_shape)
Y = np.random.rand(n_samples, *out_shape)
model.evaluate(X, Y,
batch_size=batch_size,
callbacks=[SavePrediction()])