Attention layer output shape issue - tensorflow

I have been using BiLSTMs to classify each word in sentences and my input is n_sentences, max_sequence_length, classes. Recently, I have been trying to use this attention layer: https://www.kaggle.com/takuok/bidirectional-lstm-and-attention-lb-0-043
class Attention(Layer):
def __init__(self, step_dim,
W_regularizer=None, b_regularizer=None,
W_constraint=None, b_constraint=None,
bias=True, **kwargs):
self.supports_masking = True
self.init = initializers.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
self.step_dim = step_dim
self.features_dim = 0
super(Attention, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) == 3
self.W = self.add_weight((input_shape[-1],),
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer,
constraint=self.W_constraint)
self.features_dim = input_shape[-1]
if self.bias:
self.b = self.add_weight((input_shape[1],),
initializer='zero',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)
else:
self.b = None
self.built = True
def compute_mask(self, input, input_mask=None):
return None
def call(self, x, mask=None):
features_dim = self.features_dim
step_dim = self.step_dim
eij = K.reshape(K.dot(K.reshape(x, (-1, features_dim)),
K.reshape(self.W, (features_dim, 1))), (-1, step_dim))
if self.bias:
eij += self.b
eij = K.tanh(eij)
a = K.exp(eij)
if mask is not None:
a *= K.cast(mask, K.floatx())
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
a = K.expand_dims(a)
weighted_input = x * a
return K.sum(weighted_input, axis=1)
def compute_output_shape(self, input_shape):
return input_shape[0], self.features_dim
My output needs to be (samples, steps, features) or I get this
ValueError: Error when checking target: expected dense_2 to have 2 dimensions, but got array with shape (656, 109, 2)
So I switched:
return input_shape[0], self.features_dim
to
return input_shape[0], self.step_dim, self.features_dim
Doing so I get another error:
InvalidArgumentError: Incompatible shapes: [32,109] vs. [32]
[[{{node metrics/acc/Equal}}]]
What do I need to modify to actually use the attention layer on my sentences ?

Are u using SeqSelfAttention?
I faced the same issue and instead of SeqSelfAttention I used SeqWeightedAttention - and it solved my problem.
model.add(SeqWeightedAttention())

Related

Unhashable Type TypeError: Tensors are unhashable. Instead, use tensor.ref() as the key

I am trying to implement a custom variational autoencoder. Following is the code to reproduce.
epsilon_std = 1.0
vx = tf.keras.layers.Input(batch_shape=(None, max_length_output), name='vae_enc_in')
vx_emb = tf.keras.layers.Embedding(
vocab_tar_size,
embedding_dim,
input_length=max_length_output,
name='vae_enc_emb'
)(vx)
vxbi = tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(units, return_sequences=False, recurrent_dropout=0.2, name='vae_enc_lstm'), merge_mode='concat'
)(vx_emb)
vx_drop = tf.keras.layers.Dropout(0.2, name='vae_enc_drop')(vxbi)
vx_dense = tf.keras.layers.Dense(units, activation='linear', name='vae_enc_dense')(vx_drop)
vx_elu = tf.keras.layers.ELU(name='vae_enc_elu')(vx_dense)
vx_drop1 = tf.keras.layers.Dropout(0.2, name='vae_enc_drop2')(vx_elu)
z_mean = tf.keras.layers.Dense(20, name='vae_enc_dense2')(vx_drop1)
z_log_var = tf.keras.layers.Dense(20, name='vae_enc_dense3')(vx_drop1)
def sampling(args):
z_mean, z_log_var = args
epsilon = tf.random.normal(shape=(BATCH_SIZE, 20), mean=0.,
stddev=epsilon_std)
return z_mean + tf.math.exp(z_log_var / 2) * epsilon
z = tf.keras.layers.Lambda(sampling, output_shape=(20,), name='vae_lambda')([z_mean, z_log_var])
repeated_context = tf.keras.layers.RepeatVector(max_length_output, name='vae_repeat')
decoder_h = tf.keras.layers.LSTM(units, return_sequences=True, recurrent_dropout=0.2, name='vae_dec_lstm')
decoder_mean = tf.keras.layers.TimeDistributed(
tf.keras.layers.Dense(vocab_tar_size, activation='linear', name='vae_dec_lstm'),
name='vae_dec_time_dist'
)
h_decoded = decoder_h(repeated_context(z))
x_decoded_mean = decoder_mean(h_decoded)
def zero_loss(y_true, y_pred):
print("ZERO LOSS")
return tf.zeros_like(y_pred)
And then creating a custom vae layer
class VAELayer(tf.keras.layers.Layer):
def __init__(self, batch_size, max_len, **kwargs):
self.is_placeholder = True
super(VAELayer, self).__init__(**kwargs)
self.target_weights = tf.constant(np.ones((batch_size, max_len)), tf.float32)
def vae_loss(self, x, x_decoded_mean):
#xent_loss = K.sum(metrics.categorical_crossentropy(x, x_decoded_mean), axis=-1)
labels = tf.cast(x, tf.int32)
xent_loss = tf.math.reduce_sum(
tfa.seq2seq.sequence_loss(
x_decoded_mean,
labels,
weights=self.target_weights,
average_across_timesteps=False,
average_across_batch=False
),
axis=-1
)
#softmax_loss_function=softmax_loss_f), axis=-1)#, for sampled softmax
kl_loss = - 0.5 * tf.math.reduce_sum(1 + z_log_var - tf.math.square(z_mean) - tf.math.exp(z_log_var), axis=-1)
return tf.math.reduce_mean(xent_loss + kl_loss)
def call(self, inputs):
x = inputs[0]
x_decoded_mean = inputs[1]
print(x.shape, x_decoded_mean.shape)
loss = self.vae_loss(x, x_decoded_mean)
print("Adding loss")
self.add_loss(loss, inputs=inputs)
print("Returning ones like")
return tf.ones_like(x)
I compiled it successfully and also produced a test output by calling the model. But when i try to train, it, It produces the error
TypeError: Tensors are unhashable. (KerasTensor(type_spec=TensorSpec(shape=(), dtype=tf.float32, name=None), name='tf.math.reduce_sum_25/Sum:0', description="created by layer 'tf.math.reduce_sum_25'"))Instead, use tensor.ref() as the key.
Following is the code for compiling and fitting the model
loss_layer = VAELayer(BATCH_SIZE, max_length_output)([vx, x_decoded_mean])
vae = tf.keras.Model(vx, [loss_layer], name='VariationalAutoEncoderLayer')
opt = tf.keras.optimizers.Adam(lr=0.01) #SGD(lr=1e-2, decay=1e-6, momentum=0.9, nesterov=True)
vae.compile(optimizer=opt, loss=[zero_loss])
def vae_sentence_generator():
for ip, tg in train_dataset:
yield tg.numpy()
vae.fit(vae_sentence_generator(steps_per_epoch=steps_per_epoch, epochs=10))

Is there a tensorflow keras that is a wrapper for a stack of Dense layers?

For example, this is trivial but is there a layer for this? Is not really a convolution ... there is one "Dense layer" (weights) per data point.
In [266]: X = np.random.randn(10, 3); W = np.random.randn(10, 3, 4); (X[:, :, None] * W).sum(axis=1).shape
Out[266]: (10, 4)
Create your own layer:
Warning: works only with fixed batch size, you need to define batch_shape or batch_input_shape in your models!!!!
class SampleDense(Layer):
def __init__(self, units, **kwargs):
self.units = units
super(SampleDense, self).__init__(**kwargs)
def build(self, input_shape):
weight_shape = input_shape + (self.units,)
self.kernel = self.add_weight(name='kernel',
shape=weight_shape,
initializer='uniform',
trainable=True)
self.built = True
def call(self, inputs):
inputs = K.expand_dims(inputs, axis=-1)
outputs = inputs * self.kernel
outputs = K.sum(outputs, axis=-2)
return outputs
def compute_output_shape(self, input_shape):
return input_shape[:-1] + (self.units,)

Tensorflow 2.0 How to handle tf.Variable as Weight in __call__ function?

I made 'Decoder' layer to make Product Item Matrix in Tensorflow.
I've tried to initialize W, bias and make a calculation to get loss using some value.
class Decoder(tf.keras.layers.Layer):
def __init__(self, units=len(counter), input_dim=300):
super(Decoder, self).__init__()
self.trainer = Trainer(vec_dict)
w_init = tf.random_normal_initializer()
self.w = tf.Variable(initial_value=w_init(shape=(units, input_dim), dtype='float32'), trainable=True)
b_init = tf.zeros_initializer()
self.b = tf.Variable(initial_value=b_init(shape=(units,), dtype='float32'), trainable=True)
def call(self, x_play, y_pos, y_neg):
x = self.trainer(x_play)
user_vector = x[..., tf.newaxis, :]
wp = self.w.numpy()[y_pos]
bp = self.b.numpy()[y_pos]
pos_mul = tf.multiply(user_vector, wp)
pos = tf.reduce_sum(pos_mul, axis=-1) + bp
wn = self.w.numpy()[y_neg]
bn = self.b.numpy()[y_neg]
neg_mul = tf.multiply(user_vector, wn)
neg = tf.reduce_sum(neg_mul, axis=-1) + bn
tot = tf.sigmoid(tf.concat([pos, neg], axis=1))
return tot
As you can see this, If I used '.numpy()', it can make me possible to get matrix which has the shape what I want to.
For instance, self.w has (10000, 300) shapes, and y_pos has (256, 100)
and self.w.numpy()[y_pos] has (256, 100, 300)
The problem is that if I ran this code for training, I got these kind of errors:
NotImplementedError: in converted code:
<ipython-input-835-8d5f0b593979>:4 train_step *
predictions = model(x, y_pos, y_neg)
/usr/local/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/base_layer.py:847 __call__
outputs = call_fn(cast_inputs, *args, **kwargs)
<ipython-input-838-b9b896ea4fff>:64 call
wp = self.w.numpy()[y_pos]
/usr/local/lib/python3.7/site-packages/tensorflow_core/python/ops/resource_variable_ops.py:579 numpy
"numpy() is only available when eager execution is enabled.")
NotImplementedError: numpy() is only available when eager execution is enabled.
Somebody else who let me know what's going on.

Define custom LSTM with multiple inputs

Following the tutorial writing custom layer, I am trying to implement a custom LSTM layer with multiple input tensors. I am providing two vectors input_1 and input_2 as a list [input_1, input_2] as suggested in the tutorial. The single input code is working but when I change the code for multiple inputs, its throwing the error,
self.kernel = self.add_weight(shape=(input_shape[0][-1], self.units),
TypeError: 'NoneType' object is not subscriptable.
What change I have to do to get rid of the error? Here is the modified code.
class MinimalRNNCell(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(MinimalRNNCell, self).__init__(**kwargs)
def build(self, input_shape):
print(type(input_shape))
self.kernel = self.add_weight(shape=(input_shape[0][-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = K.dot(inputs[0], self.kernel)
output = h + K.dot(prev_output, self.recurrent_kernel)
return output, [output]
# Let's use this cell in a RNN layer:
cell = MinimalRNNCell(32)
input_1 = keras.Input((None, 5))
input_2 = keras.Input((None, 5))
layer = RNN(cell)
y = layer([input_1, input_2])
Error is because of the line, y = layer([input_1, input_2]).
Replacing that line with y = layer((input_1, input_2)) (passing as Tuple of Inputs rather than List of Inputs), will resolve the error.
Complete working code using tf.keras is shown below:
from tensorflow import keras
from tensorflow.keras import backend as K
from tensorflow.keras.layers import RNN
import tensorflow as tf
class MinimalRNNCell(tf.keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
#self.state_size = [tf.TensorShape([units])]
super(MinimalRNNCell, self).__init__(**kwargs)
def build(self, input_shape):
print(type(input_shape))
self.kernel = self.add_weight(shape=(input_shape[0][-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = K.dot(inputs[0], self.kernel)
output = h + K.dot(prev_output, self.recurrent_kernel)
return output, [output]
# Let's use this cell in a RNN layer:
cell = MinimalRNNCell(32)
input_1 = tf.keras.Input((None, 5))
input_2 = tf.keras.Input((None, 5))
layer = RNN(cell)
y = layer((input_1, input_2))
Output of the above code is:
<class 'tuple'>
Hope this helps. Happy Learning!

Keras unable to calculate number of parameters in a Keras Custom Layer

I am building a Keras Custom layer with some Tensorflow support. Before that I wanted to test whether a Convolution2D layer works properly if I write a Keras layer with Tensorflow's conv2d in the call function.
class Convolutional2D(Layer):
def __init__(self, filters=None, kernel_size=None, padding='same', activation='linear', strides=(1,1), name ='Conv2D', **kwargs):
self.filters = filters
self.kernel_size = kernel_size
self.padding = padding
self.activation = activation
self.strides = strides
self.name = name
self.input_spec = [InputSpec(ndim=4)]
super(Convolutional2D, self).__init__(**kwargs)
def call(self, input):
out = tf.layers.conv2d(inputs=input, filters=self.filters, kernel_size=self.kernel_size, strides=self.strides, padding=self.padding,
data_format='channels_last')
return(out)
def compute_output_shape(self, input_shape):
batch_size = input_shape[0]
width = input_shape[1]/self.strides[0]
height = input_shape[2]/self.strides[1]
channels = self.filters
return(batch_size, width, height, channels)
def get_config(self):
config = {'filters': self.filters, 'kernel_size': self.kernel_size, 'padding': self.padding, 'activation':self.activation, 'strides':self.strides,
'name':self.name}
base_config = super(Convolutional2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
This compiles properly but when the I use model.summary() it does not calculate the number of parameters for this layer.
What do I have to do so that when I check the total number of parameters of the model the number includes the trainable number of parameters of this layer?
I have found the answer to this problem.
def build(self, input_shape):
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
kernel_shape = self.kernel_size + (input_dim, self.filters)
self.kernel = self.add_weight(shape=kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.filters,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
# Set input spec.
self.input_spec = InputSpec(ndim=self.rank + 2,
axes={channel_axis: input_dim})
self.built = True
The add weights defines the number of parameters which I have not done in my code. But that does not hamper the performance of the model. It works fine except for the fact one cannot get the number of parameters specification.