I am trying to train RALSGAN on keras, which loss function can be refer from
https://www.kaggle.com/c/generative-dog-images/discussion/99485#latest-597003
# Take AVG over x_r and x_f in batch
disc_loss = (C(x_r) - AVG(C(x_f)) - 1)^2 + (C(x_f) - AVG(C(x_r)) + 1)^2
gen_loss = (C(x_r) - AVG(C(x_f)) + 1)^2 + (C(x_f) - AVG(C(x_r)) - 1)^2
and in order to get C(x_r) & C(x_f), I send both of data into network and concatenate them before output, however, I meet error with dimension incompatible:
# =========loss function =========
import tensorflow as tf
batch =75
def TF_errD(label_one, predict):
fake, real, = tf.split(predict, [batch,batch], 0)
label_one, label_one1 = tf.split(label_one, [batch,batch], 0)
return (tf.reduce_mean( (real - tf.reduce_mean(fake,0) - label_one)**2,0 )
+ tf.reduce_mean( (fake - tf.reduce_mean(real,0) + label_one)**2,0 ) )/2.
# label_one here is a dummy, I use ones_like in return, however, get same error
def TF_errG(label_one, predict):
fake, real, = tf.split(predict, [batch,batch], 0)
return (tf.reduce_mean( (real - tf.reduce_mean(fake,0) + tf.ones_like(real))**2,0 )
+ tf.reduce_mean( (fake - tf.reduce_mean(real,0) - tf.ones_like(real))**2,0 ) )/2.
# =============end of loss function=======
#======== here is generator phase==============
from keras.layers import Input, Dense, Reshape, BatchNormalization,
Flatten, Conv2D, UpSampling2D, Conv2DTranspose
from keras.layers.advanced_activations import LeakyReLU
from keras.models import Sequential, Model
from keras.optimizers import Adam
input_dims = (1000,)
basic_units= 10
def conv_l(x, channel = 10,kernel = 4, stride= (2,2), padding='same', batch_n_momentum = 0.5,relu_arpha = 0.2 ):
x = Conv2DTranspose(filters=channel,kernel_size=kernel,strides=stride,padding=padding)(x)
x = BatchNormalization(momentum=batch_n_momentum)(x)
x = LeakyReLU(alpha=relu_arpha)(x)
return x
Input_l = Input(shape=input_dims)
x = Dense(512*8*8,input_dim=input_dims)(Input_l)
x = LeakyReLU(alpha=0.2)(x)
x = BatchNormalization(momentum=0.5)(x)
x = Reshape((8,8,512))(x)
x = conv_l(x, 512)
x = conv_l(x, 256)
x = conv_l(x, 128)
Output_l = Conv2D(3,3,padding='same',activation='tanh')(x)
Generator = Model(Input_l,Output_l)
Generator.summary()
#======== end of generator ==============
#======== here is discriminator phase==============
from keras.layers import Dropout
Discriminator = Sequential()
Discriminator.add(Conv2D(32,4,padding='same',strides= 2, input_shape= (64,64,3)))
Discriminator.add(LeakyReLU(alpha=0.2))
Discriminator.add(Conv2D(64,4,strides= 2, padding='same'))
Discriminator.add(LeakyReLU(alpha=0.2))
Discriminator.add(Conv2D(128,4,strides= 2,padding='same'))
Discriminator.add(LeakyReLU(alpha=0.2))
Discriminator.add(Conv2D(256,4,strides= 2,padding='same'))
Discriminator.add(LeakyReLU(alpha=0.2))
Discriminator.add(Dropout(0.3))
Discriminator.add(Conv2D(1,4,strides= 1,padding='valid'))
Discriminator.compile(loss=TF_errD, optimizer=optimizer_dis, metrics=['accuracy'])
Discriminator.summary()
#=========== end of discriminator ==============
#=========GAN network=========
from keras.layers.merge import concatenate
Discriminator.trainable = False
image_shape = imagesIn[0].shape
input_noise = Input(input_dims)
input_real = Input(image_shape)
output_g = Generator(input_noise)
dis_inputimage = concatenate([output_g,input_real],axis=0)
output = Discriminator(dis_inputimage)
GAN = Model([input_noise,input_real],output)
GAN.compile(loss=TF_errG, optimizer=optimizer_gen, metrics=['accuracy'])
GAN.summary()
#===============end of GAN ============
#========training phase============
epochs = 600
input_dim = 1000
label_one_d=tf.ones([150,1,1,1], tf.int32)
label_one_g=tf.ones([75,1,1,1], tf.int32)
for times in range(epochs):
for batch_times in range(int(idxIn/batch)):
# =======================
# ==train discriminater==
# =======================
sub_images = imagesIn2[batch_times*batch:(batch_times+1)*batch]
noise = np.random.normal(0,1,(batch,input_dim))
synthesis_img = Generator.predict(noise)
data = np.concatenate((synthesis_img,sub_images))
d_loss = Discriminator.train_on_batch(data,label_one_d)
# =======================
# ====train generator====
# =======================
noise = np.random.normal(0,1,(batch,input_dim))
g_loss = GAN.train_on_batch(([noise,sub_images]),label_one_g)
# ======================end of training===============
if I set label_one_g=tf.ones([75,1,1,1], tf.int32)
then I would get error like
InvalidArgumentError: 2 root error(s) found.
(0) Invalid argument: Incompatible shapes: [150,1,1,1] vs. [75,1,1,1]
[[{{node metrics_11/acc/Equal}}]]
[[loss_11/mul/_1877]]
(1) Invalid argument: Incompatible shapes: [150,1,1,1] vs. [75,1,1,1]
[[{{node metrics_11/acc/Equal}}]]
0 successful operations.
0 derived errors ignored.
if I set label_one_g=tf.ones([150,1,1,1], tf.int32), then
ValueError: Input arrays should have the same number of samples as target arrays. Found 75 input samples and 150 target samples.
Need help to address this issue. Or any other train method that I can use?
Related
I have a model based on MobileNet v2 with 2 outputs: class (cat/dog) and face coordinates. So "class" output has BinaryCrossentropy loss and "bbox" output has YOLO los (in code).
The problem is, when I try to apply metrics (accuracy for class and MeanIOU to bbox):
On random dataset: they show strange results (accuracy == 0, miou == 1 all the time).
On real dataset (images and labels): fit() throws error:
TypeError: '>' not supported between instances of 'NoneType' and 'int'
I suspect that I messed up outputs and metrics somehow, it would be nice if someone with more experience in tensorflow would take a look at it.
There is the code (with random dataset to reproduce):
import tensorflow as tf
# generate fake dataset
IMG_SIZE = 200
num_of_samples = 2000
images = tf.random.uniform((num_of_samples, IMG_SIZE, IMG_SIZE, 3), minval=0, maxval=1)
images = tf.data.Dataset.from_tensor_slices(images)
label_classes = tf.random.uniform((num_of_samples, 1), minval=0, maxval=2, dtype=tf.int32)
label_classes = tf.data.Dataset.from_tensor_slices(label_classes)
label_coords = tf.random.uniform((num_of_samples, 4), minval=0, maxval=1, dtype=tf.float16)
label_coords = tf.data.Dataset.from_tensor_slices(label_coords)
labels = tf.data.Dataset.zip((label_classes, label_coords))
train = tf.data.Dataset.zip((images, labels))
train = train.shuffle(num_of_samples)
train = train.batch(8)
train = train.prefetch(4)
num_of_samples = 500
images = tf.random.uniform((num_of_samples, IMG_SIZE, IMG_SIZE, 3), minval=0, maxval=1)
images = tf.data.Dataset.from_tensor_slices(images)
label_classes = tf.random.uniform((num_of_samples, 1), minval=0, maxval=2, dtype=tf.int32)
label_classes = tf.data.Dataset.from_tensor_slices(label_classes)
label_coords = tf.random.uniform((num_of_samples, 4), minval=0, maxval=1, dtype=tf.float16)
label_coords = tf.data.Dataset.from_tensor_slices(label_coords)
labels = tf.data.Dataset.zip((label_classes, label_coords))
valid = tf.data.Dataset.zip((images, labels))
valid = valid.shuffle(num_of_samples)
valid = valid.batch(8)
valid = valid.prefetch(4)
# Model with two outputs
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, GlobalMaxPooling2D
from tensorflow.keras.applications import MobileNetV2
def cnn_from_transfer():
IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)
input_layer = Input(shape=IMG_SHAPE)
base_net = MobileNetV2(include_top=False, weights='imagenet')(input_layer)
# Classification
h1 = GlobalMaxPooling2D()(base_net)
class1 = Dense(2048, activation='relu')(h1)
class2 = Dense(1, activation='sigmoid')(class1)
# Bounding box
h2 = GlobalMaxPooling2D()(base_net)
regress1 = Dense(2048, activation='relu')(h2)
regress2 = Dense(4, activation='sigmoid')(regress1)
return Model(inputs=input_layer, outputs=[class2, regress2])
model = cnn_from_transfer()
# Losses
def localization_loss(y_true, yhat):
delta_coord = tf.reduce_sum(tf.square(y_true[:,:2] - yhat[:,:2]))
h_true = y_true[:,3] - y_true[:,1]
w_true = y_true[:,2] - y_true[:,0]
h_pred = yhat[:,3] - yhat[:,1]
w_pred = yhat[:,2] - yhat[:,0]
delta_size = tf.reduce_sum(tf.square(w_true - w_pred) + tf.square(h_true-h_pred))
return delta_coord + delta_size
classloss = tf.keras.losses.BinaryCrossentropy()
regressloss = localization_loss
# Train
model.compile(
optimizer = tf.keras.optimizers.Adam(learning_rate=0.0001, decay=0.00001),
loss=[classloss, regressloss],
metrics=[[tf.keras.metrics.Accuracy()], [tf.keras.metrics.MeanIoU(num_classes=2)]],
)
history = model.fit(train, epochs=5, validation_data=valid)
What am I doing wrong?
As M.Innat said in comments, tf MeanIoU is not applicable to my case (bboxes), so I need to make my own custom metric (iou_metric function below). Moreover I found out that BinaryAccuracy should be used instead of regular Accuracy. The rest is fine.
Here is correct code:
def iou_metric(y_true, y_pred):
aog = tf.abs(tf.transpose(y_true)[2] - tf.transpose(y_true)[0] + 1) * tf.abs(tf.transpose(y_true)[3] - tf.transpose(y_true)[1] + 1)
aop = tf.abs(tf.transpose(y_pred)[2] - tf.transpose(y_pred)[0] + 1) * tf.abs(tf.transpose(y_pred)[3] - tf.transpose(y_pred)[1] + 1)
overlap_0 = tf.maximum(tf.transpose(y_true)[0], tf.transpose(y_pred)[0])
overlap_1 = tf.maximum(tf.transpose(y_true)[1], tf.transpose(y_pred)[1])
overlap_2 = tf.minimum(tf.transpose(y_true)[2], tf.transpose(y_pred)[2])
overlap_3 = tf.minimum(tf.transpose(y_true)[3], tf.transpose(y_pred)[3])
intersection = (overlap_2 - overlap_0 + 1) * (overlap_3 - overlap_1 + 1)
union = aog + aop - intersection
iou = intersection / union
iou = tf.keras.backend.clip(iou, 0.0 + tf.keras.backend.epsilon(), 1.0 - tf.keras.backend.epsilon())
return iou
model.compile(
optimizer = opt,
loss=[classloss, regressloss],
metrics=[[tf.keras.metrics.BinaryAccuracy()], [iou_metric]],
)
I am using this example of a VAE.
The only difference I made was change the loss from binary cross entropy to MSE, like this:
class OptimizerVAE(object):
def __init__(self, model, learning_rate=1e-3):
"""
OptimizerVAE initializer
:param model: a model object
:param learning_rate: float, learning rate of the optimizer
"""
# binary cross entropy error
self.bce = tf.keras.losses.mse(model.x, model.logits)
self.reconstruction_loss = tf.reduce_mean(tf.reduce_sum(self.bce, axis=-1))
if model.distribution == 'normal':
# KL divergence between normal approximate posterior and standard normal prior
self.p_z = tf.distributions.Normal(tf.zeros_like(model.z), tf.ones_like(model.z))
kl = model.q_z.kl_divergence(self.p_z)
self.kl = tf.reduce_mean(tf.reduce_sum(kl, axis=-1))*0.1
elif model.distribution == 'vmf':
# KL divergence between vMF approximate posterior and uniform hyper-spherical prior
self.p_z = HypersphericalUniform(model.z_dim - 1, dtype=model.x.dtype)
kl = model.q_z.kl_divergence(self.p_z)
self.kl = tf.reduce_mean(kl)*0.1
else:
raise NotImplemented
self.ELBO = - self.reconstruction_loss - self.kl
self.train_step = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(-self.ELBO)
self.print = {'recon loss': self.reconstruction_loss, 'ELBO': self.ELBO, 'KL': self.kl}
and when running the original architecture, the model runs perfectly (2 MLP layers), no matter the size of the batches (specified as "None" in the github code).
I am trying to change this to a convolutional model, but when I change just the encoder to this:
def _encoder(self, x):
"""
Encoder network
:param x: placeholder for input
:return: tuple `(z_mean, z_var)` with mean and concentration around the mean
"""
# 2 hidden layers encoder
#h0 = tf.layers.dense(x, units=self.h_dim * 2, activation=self.activation)
#h1 = tf.layers.dense(h0, units=self.h_dim, activation=self.activation)
h1 = tf.layers.conv1d(x, filters = 32, kernel_size = 7, activation = tf.nn.relu)
h1 = tf.layers.conv1d(h1, filters = 64, kernel_size = 7, activation =tf.nn.relu)
h1 = tf.layers.conv1d(h1, filters = 64, kernel_size = 7, activation = tf.nn.relu)
h1 = tf.layers.flatten(h1)
h1 = tf.layers.dense(h1, 32, activation = tf.nn.relu)
if self.distribution == 'normal':
# compute mean and std of the normal distribution
z_mean = tf.layers.dense(h1, units=self.z_dim, activation=None, name = 'z_output')
z_var = tf.layers.dense(h1, units=self.z_dim, activation=tf.nn.softplus)
elif self.distribution == 'vmf':
# compute mean and concentration of the von Mises-Fisher
z_mean = tf.layers.dense(h1, units=self.z_dim, activation=lambda x: tf.nn.l2_normalize(x, axis=-1))
# the `+ 1` prevent collapsing behaviors
z_var = tf.layers.dense(h1, units=1, activation=tf.nn.softplus) + 1
else:
raise NotImplemented
return z_mean, z_var
and when running the model, I get the error:
InvalidArgumentError: Incompatible shapes: [32,1] vs. [32,512,1]
[[{{node gradients/SquaredDifference_grad/BroadcastGradientArgs}}]]
32 is the batch_size when running the model. The thing that is confusing me is when I run this with batch_size = 1, the model runs!
Where is this going wrong? is it the optimizer and the way it averages?
I solved the issue by reshaping the output from the decoder in form: (win_size, 1), since the MLP fails to add that extra dim'n in!
I created the BatchNormalization layer in Keras, PyTorch and calculated the same operation using Numpy but I get three different results. Am I making some error here?
Things I assume below: layer.get_weights() in tf.keras for BN layer returns in order gamma, beta, running_mean, running_var. For the BN operation I am using the following operation: gamma * (x - running_mean) / sqrt(running_var + epsilon) + beta
Code snippet to reproduce the issue:
import torch
import tensorflow
from torch.nn import Module, BatchNorm1d, Conv1d
from torch.nn.functional import pad
import numpy as np
from tensorflow.keras.layers import Conv1D, BatchNormalization, Input
from tensorflow.keras.models import Model
torch.backends.cudnn.deterministic = True
np.random.seed(12345)
z = Input((1024, 8), dtype=np.float32)
inp = z
z = Conv1D(64, 16, padding='same', use_bias=False)(z)
z = BatchNormalization(epsilon=0.001)(z)
keras_model = Model(inp, z)
# in order: conv-layer weight, gamma, beta, running_mean, running_var
weights = [np.random.random((16, 8, 64)), np.random.random((64,)), np.random.random((64,)), np.random.random((64,)),
np.random.random((64,))]
weights = [np.array(x, dtype=np.float32) for x in weights]
keras_model.layers[1].set_weights([weights[0]])
keras_model.layers[2].set_weights(weights[1:])
keras_model_subpart = Model(keras_model.inputs, keras_model.layers[1].output)
class TorchModel(Module):
def __init__(self):
super(TorchModel, self).__init__()
self.l1 = Conv1d(8, 64, 16, bias=False)
self.l2 = BatchNorm1d(64, 0.001)
def forward(self, x):
x = pad(x, (7, 8))
x = self.l1(x)
y = x
x = self.l2(x)
return y, x
torch_model = TorchModel().to(torch.device('cpu'))
torch_model.l1.weight.data = torch.from_numpy(weights[0].T).float()
torch_model.l2.weight.data = torch.from_numpy(weights[1].T).float()
torch_model.l2.bias.data = torch.from_numpy(weights[2]).float()
torch_model.l2.running_mean = torch.from_numpy(weights[3]).float()
torch_model.l2.running_var = torch.from_numpy(weights[4]).float()
torch_model.eval()
input_value = np.array(np.random.random((1024, 8)), dtype=np.float32)
keras_results = [np.array(keras_model_subpart.predict(input_value[np.newaxis, :, :])),
np.array(keras_model.predict(input_value[np.newaxis, :, :]))]
with torch.no_grad():
torch_results = [x.detach().numpy() for x in torch_model(torch.from_numpy(input_value.T[np.newaxis, :, :]).float())]
keras_results = [np.squeeze(x) for x in keras_results]
torch_results = [np.squeeze(x) for x in torch_results]
numpy_results = weights[1] * (keras_results[0] - weights[3]) / np.sqrt(weights[4] + 0.001) + weights[2]
print(torch.__version__, tensorflow.__version__, np.__version__, sep=",")
print('\nRESULTS:')
print('\tLayer 1 difference:', np.mean(np.abs(keras_results[0] - torch_results[0].T).flatten()))
print('\tLayer 2 difference:', np.mean(np.abs(keras_results[1] - torch_results[1].T).flatten()))
print('\tLayer 2 keras - numpy:', np.mean(np.abs(keras_results[1] - numpy_results).flatten()))
print('\tLayer 2 torch - numpy:', np.mean(np.abs(torch_results[1] - numpy_results.T).flatten()))
The output I get (after all the initialization printing of tensorflow)
1.7.1+cu110,2.4.1,1.19.5
RESULTS:
Layer 1 difference: 0.0
Layer 2 difference: 6.8671216e-07
Layer 2 keras - numpy: 2.291581e-06
Layer 2 torch - numpy: 1.8929532e-06
I want to make a weighted average ensemble of 3 of my trained models. So, I want first to multiply the softmax output of a model (element-wise) by a vector and then average the 3 weighted outputs of the 3 models.
I used the following code to multiply the output of the first model by its weight vector:
from keras.layers import Multiply, Average
resnet_weights = np.asarray([[0.91855, 0.99485, 0.89065, 0.96525, 0.98005,
0.93645, 0.6149, 0.934, 0.92505, 0.785, 0.85]], np.float32)
resnet_weight_tensor=tf.constant(resnet_weights, np.float32)
sess = tf.InteractiveSession()
print(resnet_weight_tensor.eval())
sess.close()
resnet_weighted = Multiply()([finetuned_model.layers[-1].output, resnet_weight_tensor])
print(resnet_weighted)
new_model=Model(model.input, resnet_weighted)
However, I'm stuck with the following error:
What can I do?
Use Lambda instead of Multiply, and K.constant instead of tf.constant (is backend-neutral):
resnet_weight_tensor=K.constant(resnet_weights, 'float32')
out = finetuned_model.layers[-1].output
resnet_weighted = Lambda(lambda x: x * resnet_weight_tensor)(out)
FULL EXAMPLE:
## BUILD MODELS
batch_size = 32
num_batches = 100
input_shape = (4,)
num_classes = 3
model_1 = make_model(input_shape, 8, num_classes)
model_2 = make_model(input_shape, 10, num_classes)
model_3 = make_model(input_shape, 12, num_classes)
## BUILD ENSEMBLE
models = (model_1, model_2, model_3)
models_ins = [model.input for model in models]
models_outs = [model.input for model in models]
outputs_weights = [np.random.random((batch_size, num_classes)),
np.random.random((batch_size, num_classes)),
np.random.random((batch_size, num_classes))]
outs_avg = model_outputs_average(models, outputs_weights)
final_out = Dense(num_classes, activation='softmax')(outs_avg)
model_ensemble = Model(inputs=models_ins, outputs=final_out)
model_ensemble.compile('adam', loss='categorical_crossentropy')
### TEST ENSEMBLE
x1 = np.random.randn(batch_size, *input_shape) # toy data
x2 = np.random.randn(batch_size, *input_shape)
x3 = np.random.randn(batch_size, *input_shape)
y = np.random.randint(0,2,(batch_size, num_classes)) # toy labels
model_ensemble.fit([x1,x2,x3], y)
Verify averaging:
[print(layer.name) for layer in model_ensemble.layers] # show layer names
preouts1 = get_layer_outputs(model_ensemble, 'lambda_1', [x1,x2,x3])
preouts2 = get_layer_outputs(model_ensemble, 'lambda_2', [x1,x2,x3])
preouts3 = get_layer_outputs(model_ensemble, 'lambda_3', [x1,x2,x3])
preouts_avg = get_layer_outputs(model_ensemble, 'average_1',[x1,x2,x3])
preouts = np.asarray([preouts1, preouts2, preouts3])
sum_of_diff_of_means = np.sum(np.mean(preouts, axis=0) - preouts_avg)
print(np.sum(np.mean([preouts1, preouts2, preouts3],axis=0) - preouts_avg))
# 4.69e-07
Functions used:
def make_model(input_shape, dense_dim, num_classes=3):
ipt = Input(shape=input_shape)
x = Dense(dense_dim, activation='relu')(ipt)
out = Dense(num_classes, activation='softmax')(x)
model = Model(ipt, out)
model.compile('adam', loss='categorical_crossentropy')
return model
def model_outputs_average(models, outputs_weights):
outs = [model.output for model in models]
out_shape = K.int_shape(outs[0])[1:] # ignore batch dim
assert all([(K.int_shape(out)[1:] == out_shape) for out in outs]), \
"All model output shapes must match"
outs_weights = [K.constant(w, 'float32') for w in outputs_weights]
ow_shape = K.int_shape(outs_weights[0])
assert all([(K.int_shape(w) == ow_shape) for w in outs_weights]), \
"All outputs_weights and model.output shapes must match"
weights_layers = [Lambda(lambda x: x * ow)(out) for ow, out
in zip(outs_weights, outs)]
return Average()(weights_layers)
def get_layer_outputs(model,layer_name,input_data,train_mode=False):
outputs = [layer.output for layer in model.layers if layer_name in layer.name]
layers_fn = K.function([model.input, K.learning_phase()], outputs)
return [layers_fn([input_data,int(train_mode)])][0][0]
The bug is possibly caused by the mixture of kears api and tensorflow api, since your resnet_weight_tensor is a tensor from tensorflow api, while finetuned_model.layers[-1].output is the output from a keras layer. Some discusses can be seen here issue 7362
One walk around is to wrap resnet_weight_tensor into keras Input layer.
from keras.layers import Multiply, Average, Input
resnet_weights = np.asarray([[0.91855, 0.99485, 0.89065, 0.96525, 0.98005,
0.93645, 0.6149, 0.934, 0.92505, 0.785, 0.85]], np.float32)
resnet_weight_tensor=tf.constant(resnet_weights, np.float32)
resnet_weight_input = Input(tensor=resnet_weight_tensor)
sess = tf.InteractiveSession()
print(resnet_weight_tensor.eval())
sess.close()
resnet_weighted = Multiply()([finetuned_model.layers[-1].output, resnet_weight_input])
print(resnet_weighted)
new_model=Model([model.input, resnet_weight_input], resnet_weighted)
Trying to implement this article.
Edit1: found one error, my output size is 10 and not 1. (one number per each sentence, there are 10 sentences per document)
Edit2: I got another error that involves the batch size. when I make it 10 the model trains (!!!!). but I think it's not the right way... the error I had given batch size 3 is
Edit 3 Solved!! stuff with sizes + the fact the BIDIRECTIONAL returns different stuff from LSTM so I need to concat myself. Will put right code in answer.
InvalidArgumentError: 2 root error(s) found.
(0) Invalid argument: Incompatible shapes: [10] vs. [3]
[[{{node training_5/Adam/gradients/loss_8/dense_61_loss/mul_grad/BroadcastGradientArgs}}]]
[[metrics_8/acc/Mean_1/_5481]]
(1) Invalid argument: Incompatible shapes: [10] vs. [3]
[[{{node training_5/Adam/gradients/loss_8/dense_61_loss/mul_grad/BroadcastGradientArgs}}]]
0 successful operations.
0 derived errors ignored.
The target is extractive document summarization.
Link to colab with code
What they do is (you can see in the picture at page 3)
100 units of BI_LSTM + Attention for each sentence of the document.
Concat those and insert them to 1 BI_LSTM + Attention to get document embeddings.
Use document embeddings + hidden states from the LSTM to get some features.
Classify according to features
After a lot of struggle with keras low level api, I got a simple version to work.
What I did was to get the already sentence embedding and just do the last LSTM.
Or get words embedding of a sentence and make the small unit of sentence LSTM to work.
Now I am trying to put every thing together but can not get the sizes to fit.
My input size is
number_of_document * sentences_in_document * words_in_sentence * word_embedding
In the code I set those to be 20 * 10 * 50 * 100
(10 sentence in document is for everything to run faster for now..).
My output is
10 * 1 meaning for each sentence I get 1/0 if it's part of the document summary.
(I have not yet did the features extraction part, I just use another dense layer to give me probabilities instead..)
I think the problem is with this part of the code
X_doc = Lambda(lambda x: x[:,t, :, :])(X)
The code with sample data
from keras.layers import Bidirectional, Concatenate, Permute, Dot, Input, LSTM, Multiply
from keras.layers import RepeatVector, Dense, Activation, Lambda
from keras.optimizers import Adam
from keras.utils import to_categorical
from keras.models import load_model, Model
import keras.backend as K
import numpy as np
import keras
import random
from tqdm import tqdm
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import tensorflow as tf
from keras import backend as K
num_of_training_examples = 20
words_in_sentence = 50 # max words per sentence
sentences_in_doc = 10
model_output_size = 10
word_embeddings_size = 100
lstm_hidden_size = 200
densor1_output_size = 400
densor2_output_size = 400
x_train = np.random.rand(num_of_training_examples, sentences_in_doc, words_in_sentence, word_embeddings_size)
y_train= np.random.randint(2, size=(num_of_training_examples, sentences_in_doc))
print(x_train.shape)
print(y_train.shape)
# Initialize arrays
inputs = []
bi_lstms = []
densors_1 =[]
densors_2 = []
for i in range(sentences_in_doc):
bi_lstms.append(Bidirectional(LSTM(units = lstm_hidden_size, input_shape=(words_in_sentence, word_embeddings_size),
return_sequences=False, name='bidirectional_' + str(i)), merge_mode='concat'))
densors_1.append(Dense(densor1_output_size, activation = "tanh"))
densors_2.append(Dense(densor2_output_size, activation = "softmax"))
def invoke_sentence(sentence_matrix, index):
if index==0:
print(type(sentence_matrix))
print(tf.shape(sentence_matrix))
Ys = bi_lstms[index](sentence_matrix)
attention_middle = densors_1[index](Ys)
output = densors_2[index](attention_middle)
if index==0:
print(f'Ys shape is {Ys.shape}')
print(f'attention_middle shape is {attention_middle.shape}')
print(f'output shape is {output.shape}')
return output
def model(words_in_sentence, sentences_in_doc, lstm_hidden_size, word_embeddings_size, model_output_size):
"""
Arguments:
words_in_sentence -- Tx -- length of the input sequence - max words per sentence
sentences_in_doc --Ty -- length of the output sequence - number of sentences per document
lstm_hidden_size -- hidden state size of the Bi-LSTM
word_embeddings_size -- size of the word embeddings
model_output_size -- size of each sentence label (1 or 0)
Returns:
model -- Keras model instance
"""
sentences_embeddings = []
X = Input(shape=(sentences_in_doc , words_in_sentence, word_embeddings_size), name= 'X')
for t in range(Ty):
X_doc = Lambda(lambda x: x[:,t, :, :])(X)
print(type(X_doc))
print(X_doc)
print(X_doc.shape)
sentences_embeddings.append(invoke_sentence(X_doc, t))
sentences_embeddings_stacked = Lambda(lambda x: tf.stack(x, axis=0))(sentences_embeddings)
Ys = Bidirectional(LSTM(units = lstm_hidden_size, input_shape=(sentences_in_doc , lstm_hidden_size*2),
return_sequences=False, name='bidirectional_document'),
merge_mode='concat')(sentences_embeddings_stacked)
attention_middle = Dense(densor1_output_size, activation = "tanh")(Ys)
document_embedding = Dense(densor2_output_size, activation = "softmax")(attention_middle)
outputs = Dense(model_output_size, activation = "softmax")(document_embedding)
# compute_features(document_embeddings, sentences_embeddings, ys)
model = Model(inputs=X, outputs=outputs)
return model
model = model(words_in_sentence, sentences_in_doc, lstm_hidden_size, word_embeddings_size, model_output_size)
model.summary()
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
history = model.fit(x = x_train, y = y_train, batch_size=2, epochs=1)
# Sizes
num_of_training_examples = 20
words_in_sentence = 50 # max words per sentence
sentences_in_doc = 10
model_output_size = 10
word_embeddings_size = 100
lstm_hidden_size = 200
densor1_output_size = 400
densor2_output_size = 400
# Data
x_train = np.random.rand(num_of_training_examples, sentences_in_doc, words_in_sentence, word_embeddings_size)
y_train= np.random.randint(2, size=(num_of_training_examples, sentences_in_doc))
print(x_train.shape)
print(y_train.shape)
# Initialize arrays
inputs = []
bi_lstms = []
densors_1 =[]
densors_2 = []
for i in range(sentences_in_doc):
bi_lstms.append(Bidirectional(LSTM(units = lstm_hidden_size, input_shape=(words_in_sentence, word_embeddings_size),
return_sequences=True, return_state=True, name='bidirectional_' + str(i))))
densors_1.append(Dense(densor1_output_size, activation = "tanh",name='senteence_dense_tanh' + str(i)))
densors_2.append(Dense(densor2_output_size, activation = "softmax",name='senteence_dense_softmax' + str(i)))
def invoke_sentence(sentence_matrix, index):
if index==0:
print(type(sentence_matrix))
print(tf.shape(sentence_matrix))
lstm, forward_h, forward_c, backward_h, backward_c = bi_lstms[index](sentence_matrix)
state_h = Concatenate()([forward_h, backward_h])
state_c = Concatenate()([forward_c, backward_c])
attention_middle = densors_1[index](state_h)
output = densors_2[index](attention_middle)
if index==0:
print(f'lstm shape is {lstm.shape}')
print(f'state_h shape is {state_h.shape}')
print(f'state_c shape is {state_c.shape}')
print(f'attention_middle shape is {attention_middle.shape}')
print(f'output shape is {output.shape}')
return output
def model(words_in_sentence, sentences_in_doc, lstm_hidden_size, word_embeddings_size, model_output_size):
"""
Arguments:
words_in_sentence -- Tx -- length of the input sequence - max words per sentence
sentences_in_doc --Ty -- length of the output sequence - number of sentences per document
lstm_hidden_size -- hidden state size of the Bi-LSTM
word_embeddings_size -- size of the word embeddings
model_output_size -- size of each sentence label (1 or 0)
Returns:
model -- Keras model instance
"""
sentences_embeddings = []
X = Input(shape=(sentences_in_doc, words_in_sentence, word_embeddings_size), name= 'X')
for t in range(sentences_in_doc):
X_doc = Lambda(lambda x: x[:, t,:, :])(X)
if(t==0):
print("X_doc")
print(type(X_doc))
print(X_doc)
print(X_doc.shape)
sentence_embedding = invoke_sentence(X_doc, t)
sentences_embeddings.append(sentence_embedding)
if(t==0):
print("sentence_embedding")
print(type(sentence_embedding))
print(sentence_embedding)
print(sentence_embedding.shape)
sentences_embeddings_stacked = Lambda(lambda x: tf.stack(x, axis=1))(sentences_embeddings)
print("sentences_embeddings_stacked")
print(type(sentences_embeddings_stacked))
print(sentences_embeddings_stacked)
print(sentences_embeddings_stacked.shape)
doc_lstm, doc_forward_h, doc_forward_c, doc_backward_h, doc_backward_c = Bidirectional(LSTM(units = lstm_hidden_size, input_shape=(sentences_in_doc, lstm_hidden_size*2),
return_sequences=True, return_state=True, name='bidirectional_document'),
merge_mode='concat')(sentences_embeddings_stacked)
doc_state_h = Concatenate()([doc_forward_h, doc_backward_h])
doc_state_c = Concatenate()([doc_forward_c, doc_backward_c])
print(f'doc_lstm shape is {doc_lstm.shape}')
print(f'doc_state_h shape is {doc_state_h.shape}')
print(f'doc_state_c shape is {doc_state_c.shape}')
attention_middle = Dense(densor1_output_size, activation = "tanh")(doc_state_h)
document_embedding = Dense(densor2_output_size, activation = "softmax")(attention_middle)
print(f'document_embedding shape is {document_embedding.shape}')
# my_layer = MyLayer(input_shape=((400), (10,400), (10,400)), output_dim=2)
# custom_output = my_layer([document_embedding, sentences_embeddings_stacked, doc_state_h])
# print(f'custom_output shape is {custom_output.shape}')
outputs = Dense(model_output_size, activation = "softmax")(document_embedding)
model = Model(inputs=X, outputs=outputs)
return model
model = model(words_in_sentence, sentences_in_doc, lstm_hidden_size, word_embeddings_size, model_output_size)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
history = model.fit(x = x_train, y = y_train, batch_size=5, epochs=1)