Tensorflow / Keras, trouble with Embedding and sparse_categorical_crossentropy - tensorflow

Keras noob here,
I'm trying to build an LSTM network to generate text using Shakespeare's works. (A bit like in this tutorial)
This the method which generates my model:
def generate_model(seq_len=100, stateful=True):
# Initialize model
source = tf.keras.Input(
name='seed', shape=(seq_len,), dtype=tf.int32)
# Embed ascii character (0 to 255) into one hot encoding (0, 1, 0...)
embedding = tf.keras.layers.Embedding(input_dim=256, output_dim=EMBEDDING_DIM, input_length=seq_len)(source)
# Good old LSTM's
lstm_1 = tf.keras.layers.LSTM(EMBEDDING_DIM, stateful=stateful, return_sequences=True)(embedding)
lstm_2 = tf.keras.layers.LSTM(EMBEDDING_DIM, stateful=stateful, return_sequences=True)(lstm_1)
# I honestly don't understand what the TimeDistributed method does
predicted_char = tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(256, activation='softmax'))(lstm_2)
model = tf.keras.Model(inputs=[source], outputs=[predicted_char])
model.compile(
optimizer=tf.train.RMSPropOptimizer(learning_rate=0.01),
loss='categorical_crossentropy',
metrics=['categorical_accuracy'])
return model
I thought the Embedding layer was responsible for one-hot encoding all my characters into vectors. If this is true, is that structure not preserved while passing through the LSTM layers? I'm a bit confused.
For reference, this is an example of an input:
[65 76 76 83 32 87 69 76 76 32]
(before encoding it would be)
['A', 'L', 'L', 'S', ' ', 'W', 'E', 'L', 'L', ' ']
The corresponding label is the next character in the sequence:
[84] ie ['T']
I'm struggling with what appears to be a common error among newcomers
tensorflow.python.framework.errors_impl.InvalidArgumentError: logits and labels must have the same first dimension, got logits shape [100,256] and labels shape [1]
[[{{node loss/time_distributed_loss/SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits}}]]
[[{{node training/TFOptimizer/gradients/embedding/embedding_lookup_grad/Reshape}}]]
From the research I've done, it would appear that this has to do with my use of sparse_categorical_crossentropy, however, if I use categorical_crossentropy I get the following error during training
ValueError: You are passing a target array of shape (5524624, 1) while using as loss `categorical_crossentropy`. `categorical_crossentropy` expects targets to be binary matrices (1s and 0s) of shape (samples, classes). If your targets are integer classes, you can convert them to the expected format via:
I must be missing something, how could I get my model to train?
Thank you, I would really appreciate any help :))
Edit:
If it could be useful, this is all of my code:
import numpy as np
import tensorflow as tf
SHAKESPEARE_TXT = 'shakespeare.txt'
with open(SHAKESPEARE_TXT, 'r', encoding="utf8") as f:
raw = f.read()
def encode(txt):
# drop any non-ascii characters
output = np.asarray([ord(c) for c in txt if ord(c) < 255 and c != '\r'], dtype=np.int32)
return output
def decode(txt):
return [chr(c) for c in txt]
def get_training_data(seq_len, txt=raw):
source = encode(txt)
x, y = [], []
n = len(source) - seq_len
#n=100
for i in range(n):
sequence = source[i: i + seq_len]
x.append(sequence)
y.append([source[i + seq_len]])
return np.asarray(x), np.asarray(y)
# txt = encode(raw)
# print(decode(txt[0:100]))
'''
training_data = get_training_data(seq_len=10)
for i in range(10):
print(decode(training_data[0][i]), decode(training_data[1][i]))
'''
EMBEDDING_DIM = 512
def generate_model(seq_len=100, stateful=True):
# Initialize model
source = tf.keras.Input(
name='seed', shape=(seq_len,), dtype=tf.int32)
# Embed ascii character (0 to 255) into one hot encoding (0, 1, 0...)
embedding = tf.keras.layers.Embedding(input_dim=256, output_dim=EMBEDDING_DIM, input_length=seq_len)(source)
# Good old LSTM's
lstm_1 = tf.keras.layers.LSTM(EMBEDDING_DIM, stateful=stateful, return_sequences=True)(embedding)
lstm_2 = tf.keras.layers.LSTM(EMBEDDING_DIM, stateful=stateful, return_sequences=True)(lstm_1)
# I honestly don't understand what the TimeDistributed method does
predicted_char = tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(256, activation='softmax'))(lstm_2)
model = tf.keras.Model(inputs=[source], outputs=[predicted_char])
model.compile(
optimizer=tf.train.RMSPropOptimizer(learning_rate=0.01),
loss='categorical_crossentropy',
metrics=['categorical_accuracy'])
return model
def train():
tf.keras.backend.clear_session()
print("Creating model")
training_model = generate_model(seq_len=100, stateful=False)
'''
tpu_model = tf.contrib.tpu.keras_to_tpu_model(
training_model,
strategy=tf.contrib.tpu.TPUDistributionStrategy(
tf.contrib.cluster_resolver.TPUClusterResolver(TPU_WORKER)))
'''
print("Training")
data = get_training_data(seq_len=100)
'''
print(data[0].shape)
print(data[0][0])
print(data[1].shape)
print(data[1][0])
'''
# Start training
training_model.fit(
x=data[0],
y=data[1],
batch_size=1,
# steps_per_epoch=100,
epochs=2
)
print("Saving")
training_model.save_weights('/tmp/bard.h5', overwrite=True)
train()

Related

Unexpected input data type. Actual: (tensor(double)) , expected: (tensor(float))

I am learning this new ONNX framework that allows us to deploy the deep learning (and others) model into production.
However, there is one thing I am missing. I thought that the main reason for having such a framework is so that for inference purposes e.g. when we have a trained model and want to use it in a different venv (where for example we cannot have PyTorch) the model still can be used.
I have preped a "from scratch" example here:
# Modules
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader, TensorDataset
import torchvision
import onnx
import onnxruntime
import matplotlib.pyplot as plt
import numpy as np
# %config Completer.use_jedi = False
# MNIST Example dataset
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST(
'data', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
])),
batch_size=800)
# Take data and labels "by hand"
inputs_batch, labels_batch = next(iter(train_loader))
# Simple Model
class CNN(nn.Module):
def __init__(self, in_channels, num_classes):
super(CNN, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels,
out_channels = 10, kernel_size = (3, 3), stride = (1, 1), padding=(1, 1))
self.pool = nn.MaxPool2d(kernel_size=(2, 2), stride = (2, 2))
self.conv2 = nn.Conv2d(in_channels = 10, out_channels=16, kernel_size = (3, 3), stride = (1, 1), padding=(1, 1))
self.fc1 = nn.Linear(16*7*7, num_classes)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.pool(x)
x = F.relu(self.conv2(x))
x = self.pool(x)
x = x.reshape(x.shape[0], -1)
x = self.fc1(x)
return x
# Training setting
device = 'cpu'
batch_size = 64
learning_rate = 0.001
n_epochs = 10
# Dataset prep
dataset = TensorDataset(inputs_batch, labels_batch)
TRAIN_DF = DataLoader(dataset = dataset, batch_size = batch_size, shuffle = True)
# Model Init
model = CNN(in_channels=1, num_classes=10)
optimizer = optim.Adam(model.parameters(), lr = learning_rate)
# Training Loop
for epoch in range(n_epochs):
for data, labels in TRAIN_DF:
model.train()
# Send Data to GPU
data = data.to(device)
# Send Data to GPU
labels = labels.to(device)
# data = data.reshape(data.shape[0], -1)
# Forward
pred = model(data)
loss = F.cross_entropy(pred, labels)
# Backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Check Accuracy
def check_accuracy(loader, model):
num_correct = 0
num_total = 0
model.eval()
with torch.no_grad():
for x, y in loader:
x = x.to(device)
y = y.to(device)
# x = x.reshape(x.shape[0], -1)
scores = model(x)
_, pred = scores.max(1)
num_correct += (pred == y).sum()
num_total += pred.size(0)
print(F"Got {num_correct} / {num_total} with accuracy {float(num_correct)/float(num_total)*100: .2f}")
check_accuracy(TRAIN_DF, model)
# Inference with ONNX
# Create Artifical data of the same size
img_size = 28
dummy_data = torch.randn(1, img_size, img_size)
dummy_input = torch.autograd.Variable(dummy_data).unsqueeze(0)
input_name = "input"
output_name = "output"
model_eval = model.eval()
torch.onnx.export(
model_eval,
dummy_input,
"model_CNN.onnx",
input_names=["input"],
output_names=["output"],
)
# Take Random Image from Training Data
X_pred = data[4].unsqueeze(0)
# Convert the Tensor image to PURE numpy and pretend we are working in venv where we only have numpy - NO PYTORCH
X_pred_np = X_pred.numpy()
X_pred_np = np.array(X_pred_np)
IMG_Rando = np.random.rand(1, 1, 28, 28)
np.shape(X_pred_np) == np.shape(IMG_Rando)
ort_session = onnxruntime.InferenceSession(
"model_CNN.onnx"
)
def to_numpy(tensor):
return (
tensor.detach().gpu().numpy()
if tensor.requires_grad
else tensor.cpu().numpy()
)
# compute ONNX Runtime output prediction
# WORKS
# ort_inputs = {ort_session.get_inputs()[0].name: X_pred_np}
# DOES NOT WORK
ort_inputs = {ort_session.get_inputs()[0].name: IMG_Rando}
# WORKS
# ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(X_pred)}
ort_outs = ort_session.run(None, ort_inputs)
ort_outs
Firstly, we create a simple model and train it on the MNIST dataset.
Then we export the trained model using the ONNX framework.
Now, when I want to classify an image using the X_pred_np It works even though it is a "pure" NumPy, which is what I want.
However, I suspect that this particular case works only because it has been derived from the PyTorch tensor object, and thus "under the hood" it still has PyTorch attributes.
While when I try to inference on the random "pure" NumPy object IMG_Rando, there seems to be a problem:
Unexpected input data type. Actual: (tensor(double)) , expected: (tensor(float)).
Referring that PyTorch form is needed.
Is there a way how to be able to use only numpy Images for the ONNX predictions?. So the inference can be performed in separated venv where no pytorch is installed?
Secondly, is there a way that ONNX would remember the actual classes?
In this particular case, the index corresponds to the label of the image. However, in animal classification, ONNX would not provide us with the "DOG" and "CAT" and other labels but would only provide us the index of the predicted label. Which we would need to run throw our own "prediction dictionary" so we know that the fifth label is associated with "cat" and sixth label is associated with "dog" etc.
Numpy defaults to float64 while pytorch defaults to float32. Cast the input to float32 before the inference:
IMG_Rando = np.random.rand(1, 1, 28, 28).astype(np.float32)
double is short for double-precision floating-point format, which is a floating point number representation on 64 bits, while float refers to a floating point number on 32 bits.
As an improvement to the accepted answer, the idiomatic way to generate random numbers in Numpy is now by using a Generator. This offers the benefit of being able to create the array in the right type directly, rather than using the expensive astype operation, which copies the array (as in the accepted answer). Thus, the improved solution would look like:
rng = np.random.default_rng() # set seed if desired
IMG_Rando = rng.random((1, 1, 28, 28), dtype=np.float32)

Custom Attention Layer using in Keras

I want to create a custom attention layer that for input at any time this layer returns the weighted mean of inputs at all time inputs.
For Example, I want that input tensor with shape [32,100,2048] goes to layer and I get the tensor with the shape [32,100,2048]. I wrote the Layer as follow:
import tensorflow as tf
from keras.layers import Layer, Dense
#or
from tensorflow.keras.layers import Layer, Dense
class Attention(Layer):
def __init__(self, units_att):
self.units_att = units_att
self.W = Dense(units_att)
self.V = Dense(1)
super().__init__()
def __call__(self, values):
t = tf.constant(0, dtype= tf.int32)
time_steps = tf.shape(values)[1]
initial_outputs = tf.TensorArray(dtype=tf.float32, size=time_steps)
initial_att = tf.TensorArray(dtype=tf.float32, size=time_steps)
def should_continue(t, *args):
return t < time_steps
def iteration(t, values, outputs, atts):
score = self.V(tf.nn.tanh(self.W(values)))
# attention_weights shape == (batch_size, time_step, 1)
attention_weights = tf.nn.softmax(score, axis=1)
# context_vector shape after sum == (batch_size, hidden_size)
context_vector = attention_weights * values
context_vector = tf.reduce_sum(context_vector, axis=1)
outputs = outputs.write(t, context_vector)
atts = atts.write(t, attention_weights)
return t + 1, values, outputs, atts
t, values, outputs, atts = tf.while_loop(should_continue, iteration,
[t, values, initial_outputs, initial_att])
outputs = outputs.stack()
outputs = tf.transpose(outputs, [1,0,2])
atts = atts.stack()
atts = tf.squeeze(atts, -1)
atts = tf.transpose(atts, [1,0,2])
return t, values, outputs, atts
For input= tf.constant(2, shape= [32, 100, 2048], dtype= tf.float32) I get the
output with shape = [32,100,2048] in tf2 and [32,None, 2048] in tf1.
For Input input= Input(shape= (None, 2048)) I get the output with shape = [None, None, 2048] in tf1 and I get error
TypeError: 'Tensor' object cannot be interpreted as an integer
in tf2.
Finally, in both cases, I can't use this layer in my model because my model input is Input(shape= (None, 2048)) and I get the error
AttributeError: 'NoneType' object has no attribute '_inbound_nodes'
in tf1 and in tf2 I get the same error as said in above, I create my model with Keras functional method.
From the code you have shared, looks like you want to implement Bahdanau's attention layer in your code. You want to attend to all the 'values' (prev layer output - all its hidden states) and your 'query' would be the last hidden state of the decoder. Your code should actually be very simple and should look like:
class Bahdanau(tf.keras.layers.Layer):
def __init__(self, n):
super(Bahdanau, self).__init__()
self.w = tf.keras.layers.Dense(n)
self.u = tf.keras.layers.Dense(n)
self.v = tf.keras.layers.Dense(1)
def call(self, query, values):
query = tf.expand_dims(query, 1)
e = self.v(tf.nn.tanh(self.w(query) + self.u(values)))
a = tf.nn.softmax(e, axis=1)
c = a * values
c = tf.reduce_sum(c, axis=1)
return a,c
##Say we want 10 units in the single layer MLP determining w,u
attentionlayer = Bahdanau(10)
##Call with i/p: decoderstate # t-1 and all encoder hidden states
a, c = attentionlayer(stminus1, hj)
We are not specifying the tensor shape anywhere in the code. This code will return you a context tensor of same size as 'stminus1' which is the 'query'. It does this after attending to all the 'values' (all output states of decoder) using Bahdanau's attention mechanism.
So assuming your batch size is 32, timesteps=100 and embedding dimension=2048, the shape of stminus1 should be (32,2048) and the shape of the hj should be (32,100,2048). The shape of the output context would be (32,2048). We also returned the 100 attention weights just in case you want to route them to a nice display.
This is the simplest version of 'Attention'. If you have any other intent, please let me know and I will reformat my answer. For more specific details, please refer https://towardsdatascience.com/create-your-own-custom-attention-layer-understand-all-flavours-2201b5e8be9e

Generating Input for LSTM from universal sentence encoder output

I am working on a multi-class classification problem using LSTM and embeddings obtained from Universal sentence encoder.
Previously I was using Glove embeddings, and I get the required input shape for LSTM (batch_size, timesteps, input_dim). I am planning to use the Universal sentence encoder found that the output of Universal Sentence Encoder is 2d [batch, feature]. How can I make the required changes.
LSTM + Universal sentence encoder
EMBED_SIZE = 512
module_url = "https://tfhub.dev/google/universal-sentence-encoder-large/3"
embed = hub.Module(module_url)
def UniversalEmbedding(x):
return embed(tf.squeeze(tf.cast(x, tf.string)),
signature="default", as_dict=True)["default"]
seq_input = Input(shape=(MAX_SEQUENCE_LENGTH,),dtype='int32')
print("seq i",seq_input.shape,seq_input)
embedded_seq = Lambda(UniversalEmbedding,
output_shape=(EMBED_SIZE,))(seq_input)
print("EMD SEQ",embedding.shape,type(embedded_seq))
# (timesteps, n_features) (,MAX_SEQUENCE_LENGTH, EMBED_SIZE) (,150,512)
x_1 = LSTM(units=NUM_LSTM_UNITS,
name='blstm_1',
dropout=DROP_RATE_LSTM)(embedded_seq)
print(x_1)
This produces following error
seq i (?, 150) Tensor("input_8:0", shape=(?, 150), dtype=int32)
INFO:tensorflow:Saver not created because there are no variables in the graph to restore
I0529 07:24:32.504808 140127577749376 saver.py:1483] Saver not created because there are no variables in the graph to restore
EMD SEQ (?, 512) <class 'tensorflow.python.framework.ops.Tensor'>
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-34-ea634319205b> in <module>()
12 x_1 = LSTM(units=NUM_LSTM_UNITS,
13 name='blstm_1',
---> 14 dropout=DROP_RATE_LSTM)(embedded_seq)
15 print(x_1)
16
2 frames
/usr/local/lib/python3.6/dist-packages/keras/engine/base_layer.py in assert_input_compatibility(self, inputs)
309 self.name + ': expected ndim=' +
310 str(spec.ndim) + ', found ndim=' +
--> 311 str(K.ndim(x)))
312 if spec.max_ndim is not None:
313 ndim = K.ndim(x)
ValueError: Input 0 is incompatible with layer blstm_1: expected ndim=3, found ndim=2
LSTM + Glove embeddings
embedding_layer = Embedding(nb_words,
EMBED_SIZE,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
seq_input = Input(shape=(MAX_SEQUENCE_LENGTH,),dtype='int32')
print("SEQ INP",seq_input,seq_input.shape)
embedded_seq = embedding_layer(seq_input)
print("EMD SEQ",embedded_seq.shape)
# Bi-directional LSTM # (timesteps, n_features)
x_1 = Bidirectional(LSTM(units=NUM_LSTM_UNITS,
name='blstm_1',
dropout=DROP_RATE_LSTM,
recurrent_dropout=DROP_RATE_LSTM),
merge_mode='concat')(embedded_seq)
x_1 = Dropout(DROP_RATE_DENSE)(x_1)
x_1 = Dense(NUM_DENSE_UNITS,activation='relu')(x_1)
x_1 = Dropout(DROP_RATE_DENSE)(x_1)
OUTPUT (This works properly with LSTM)
SEQ INP Tensor("input_2:0", shape=(?, 150), dtype=int32) (?, 150)
EMD SEQ (?, 150, 300)
Sentence Encoder is different from word2vec or Glove, it's not word-level embeddings:
The model is trained and optimized for greater-than-word length text,
such as sentences, phrases or short paragraphs. It is trained on a
variety of data sources and a variety of tasks with the aim of
dynamically accommodating a wide variety of natural language
understanding tasks. The input is variable length English text and the
output is a 512 dimensional vector. We apply this model to the STS
benchmark for semantic similarity, and the results can be seen in the
example notebook made available. The universal-sentence-encoder model
is trained with a deep averaging network (DAN) encoder.
The example above where they used "lambda" function is for FF neural network, and the input to the next layer is 2D, unlike RNN of CNN (3D).
Shortly, what you have to do is to prepare your text before then feed it to your network with Embedding layer:
def process_text(sentences_list):
path = './processed_data'
embeddings_file = "embeddings-{}.pickle".format(len(sentences_list))
if not os.path.isfile(join(path, embeddings_file)):
module_url = "https://tfhub.dev/google/universal-sentence-encoder-large/3"
embed = hub.Module(module_url)
with tf.Session() as sess:
sess.run([tf.global_variables_initializer(), tf.tables_initializer()])
sentences_list = sess.run(embed(sentences_list))
sentences_list = np.array(sentences_list)
sentences_list = np.array([np.reshape(embedding, (len(embedding), 1)) for embedding in sentences_list])
pickle.dump(sentences_list, open(embeddings_file, 'wb'))
else:
sentences_list = pickle.load(open(join(path, embeddings_file), 'rb'))
return sentences_list
I recommend you to save the generated embeddings, as I do in the example, because it will take few time to retrieve the embeddings.
Source: Sentiment Analysis on Twitter Data using Universal Sentence Encoder

Getting error while adding embedding layer to lstm autoencoder

I have a seq2seq model which is working fine. I want to add an embedding layer in this network which I faced with an error.
this is my architecture using pretrained word embedding which is working fine(Actually the code is almost the same code available here, but I want to include the Embedding layer in the model rather than using the pretrained embedding vectors):
LATENT_SIZE = 20
inputs = Input(shape=(SEQUENCE_LEN, EMBED_SIZE), name="input")
encoded = Bidirectional(LSTM(LATENT_SIZE), merge_mode="sum", name="encoder_lstm")(inputs)
encoded = Lambda(rev_ent)(encoded)
decoded = RepeatVector(SEQUENCE_LEN, name="repeater")(encoded)
decoded = Bidirectional(LSTM(EMBED_SIZE, return_sequences=True), merge_mode="sum", name="decoder_lstm")(decoded)
autoencoder = Model(inputs, decoded)
autoencoder.compile(optimizer="sgd", loss='mse')
autoencoder.summary()
NUM_EPOCHS = 1
num_train_steps = len(Xtrain) // BATCH_SIZE
num_test_steps = len(Xtest) // BATCH_SIZE
checkpoint = ModelCheckpoint(filepath=os.path.join('Data/', "simple_ae_to_compare"), save_best_only=True)
history = autoencoder.fit_generator(train_gen, steps_per_epoch=num_train_steps, epochs=NUM_EPOCHS, validation_data=test_gen, validation_steps=num_test_steps, callbacks=[checkpoint])
This is the summary:
Layer (type) Output Shape Param #
=================================================================
input (InputLayer) (None, 45, 50) 0
_________________________________________________________________
encoder_lstm (Bidirectional) (None, 20) 11360
_________________________________________________________________
lambda_1 (Lambda) (512, 20) 0
_________________________________________________________________
repeater (RepeatVector) (512, 45, 20) 0
_________________________________________________________________
decoder_lstm (Bidirectional) (512, 45, 50) 28400
when I change the code to add the embedding layer like this:
inputs = Input(shape=(SEQUENCE_LEN,), name="input")
embedding = Embedding(output_dim=EMBED_SIZE, input_dim=VOCAB_SIZE, input_length=SEQUENCE_LEN, trainable=True)(inputs)
encoded = Bidirectional(LSTM(LATENT_SIZE), merge_mode="sum", name="encoder_lstm")(embedding)
I received this error:
expected decoder_lstm to have 3 dimensions, but got array with shape (512, 45)
So my question, what is wrong with my model?
Update
So, this error is raised in the training phase. I also checked the dimension of the data being fed to the model, it is (61598, 45) which clearly do not have the number of features or here, Embed_dim.
But why this error raises in the decoder part? because in the encoder part I have included the Embedding layer, so it is totally fine. though when it reached the decoder part and it does not have the embedding layer so it can not correctly reshape it to three dimensional.
Now the question comes why this is not happening in a similar code?
this is my view, correct me if I'm wrong. because Seq2Seq code usually being used for Translation, summarization. and in those codes, in the decoder part also there is input (in the translation case, there is the other language input to the decoder, so the idea of having embedding in the decoder part makes sense).
Finally, here I do not have seperate input, that's why I do not need any separate embedding in the decoder part. However, I don't know how to fix the problem, I just know why this is happening:|
Update2
this is my data being fed to the model:
sent_wids = np.zeros((len(parsed_sentences),SEQUENCE_LEN),'int32')
sample_seq_weights = np.zeros((len(parsed_sentences),SEQUENCE_LEN),'float')
for index_sentence in range(len(parsed_sentences)):
temp_sentence = parsed_sentences[index_sentence]
temp_words = nltk.word_tokenize(temp_sentence)
for index_word in range(SEQUENCE_LEN):
if index_word < sent_lens[index_sentence]:
sent_wids[index_sentence,index_word] = lookup_word2id(temp_words[index_word])
else:
sent_wids[index_sentence, index_word] = lookup_word2id('PAD')
def sentence_generator(X,embeddings, batch_size, sample_weights):
while True:
# loop once per epoch
num_recs = X.shape[0]
indices = np.random.permutation(np.arange(num_recs))
# print(embeddings.shape)
num_batches = num_recs // batch_size
for bid in range(num_batches):
sids = indices[bid * batch_size : (bid + 1) * batch_size]
temp_sents = X[sids, :]
Xbatch = embeddings[temp_sents]
weights = sample_weights[sids, :]
yield Xbatch, Xbatch
LATENT_SIZE = 60
train_size = 0.95
split_index = int(math.ceil(len(sent_wids)*train_size))
Xtrain = sent_wids[0:split_index, :]
Xtest = sent_wids[split_index:, :]
train_w = sample_seq_weights[0: split_index, :]
test_w = sample_seq_weights[split_index:, :]
train_gen = sentence_generator(Xtrain, embeddings, BATCH_SIZE,train_w)
test_gen = sentence_generator(Xtest, embeddings , BATCH_SIZE,test_w)
and parsed_sentences is 61598 sentences which are padded.
Also, this is the layer I have in the model as Lambda layer, I just added here in case it has any effect ever:
def rev_entropy(x):
def row_entropy(row):
_, _, count = tf.unique_with_counts(row)
count = tf.cast(count,tf.float32)
prob = count / tf.reduce_sum(count)
prob = tf.cast(prob,tf.float32)
rev = -tf.reduce_sum(prob * tf.log(prob))
return rev
nw = tf.reduce_sum(x,axis=1)
rev = tf.map_fn(row_entropy, x)
rev = tf.where(tf.is_nan(rev), tf.zeros_like(rev), rev)
rev = tf.cast(rev, tf.float32)
max_entropy = tf.log(tf.clip_by_value(nw,2,LATENT_SIZE))
concentration = (max_entropy/(1+rev))
new_x = x * (tf.reshape(concentration, [BATCH_SIZE, 1]))
return new_x
Any help is appreciated:)
I tried the following example on Google colab (TensorFlow version 1.13.1),
from tensorflow.python import keras
import numpy as np
SEQUENCE_LEN = 45
LATENT_SIZE = 20
EMBED_SIZE = 50
VOCAB_SIZE = 100
inputs = keras.layers.Input(shape=(SEQUENCE_LEN,), name="input")
embedding = keras.layers.Embedding(output_dim=EMBED_SIZE, input_dim=VOCAB_SIZE, input_length=SEQUENCE_LEN, trainable=True)(inputs)
encoded = keras.layers.Bidirectional(keras.layers.LSTM(LATENT_SIZE), merge_mode="sum", name="encoder_lstm")(embedding)
decoded = keras.layers.RepeatVector(SEQUENCE_LEN, name="repeater")(encoded)
decoded = keras.layers.Bidirectional(keras.layers.LSTM(EMBED_SIZE, return_sequences=True), merge_mode="sum", name="decoder_lstm")(decoded)
autoencoder = keras.models.Model(inputs, decoded)
autoencoder.compile(optimizer="sgd", loss='mse')
autoencoder.summary()
And then trained the model using some random data,
x = np.random.randint(0, 90, size=(10, 45))
y = np.random.normal(size=(10, 45, 50))
history = autoencoder.fit(x, y, epochs=NUM_EPOCHS)
This solution worked fine. I feel like the issue might be the way you are feeding in labels/outputs for MSE calculation.
Update
Context
In the original problem, you are attempting to reconstruct word embeddings using a seq2seq model, where embeddings are fixed and pre-trained. However you want to use a trainable embedding layer as a part of the model it becomes very difficult to model this problem. Because you don't have fixed targets (i.e. targets change every single iteration of the optimization because your embedding layer is changing). Furthermore this will lead to a very unstable optimization problem, because the targets are changing all the time.
Fixing your code
If you do the following you should be able to get the code working. Here embeddings is the pre-trained GloVe vector numpy.ndarray.
def sentence_generator(X, embeddings, batch_size):
while True:
# loop once per epoch
num_recs = X.shape[0]
embed_size = embeddings.shape[1]
indices = np.random.permutation(np.arange(num_recs))
# print(embeddings.shape)
num_batches = num_recs // batch_size
for bid in range(num_batches):
sids = indices[bid * batch_size : (bid + 1) * batch_size]
# Xbatch is a [batch_size, seq_length] array
Xbatch = X[sids, :]
# Creating the Y targets
Xembed = embeddings[Xbatch.reshape(-1),:]
# Ybatch will be [batch_size, seq_length, embed_size] array
Ybatch = Xembed.reshape(batch_size, -1, embed_size)
yield Xbatch, Ybatch

How to use a pre-trained embedding matrix in tensorflow 2.0 RNN as initial weights in an embedding layer?

I'd like to use a pretrained GloVe embedding as the initial weights for an embedding layer in an RNN encoder/decoder. The code is in Tensorflow 2.0. Simply adding the embedding matrix as a weights = [embedding_matrix] parameter to the tf.keras.layers.Embedding layer won't do it because the encoder is an object and I'm not sure now to effectively pass the embedding_matrix to this object at training time.
My code closely follows the neural machine translation example in the Tensorflow 2.0 documentation. How would I add a pre-trained embedding matrix to the encoder in this example? The encoder is an object. When I get to training, the GloVe embedding matrix is unavailable to the Tensorflow graph. I get the error message:
RuntimeError: Cannot get value inside Tensorflow graph function.
The code uses the GradientTape method and teacher forcing in the training process.
I've tried modifying the encoder object to include the embedding_matrix at various points, including in the encoder's init, call and initialize_hidden_state. All of these fail. The other questions on stackoverflow and elsewhere are for Keras or older versions of Tensorflow, not Tensorflow 2.0.
class Encoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, enc_units, batch_sz):
super(Encoder, self).__init__()
self.batch_sz = batch_sz
self.enc_units = enc_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim, weights=[embedding_matrix])
self.gru = tf.keras.layers.GRU(self.enc_units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
def call(self, x, hidden):
x = self.embedding(x)
output, state = self.gru(x, initial_state = hidden)
return output, state
def initialize_hidden_state(self):
return tf.zeros((self.batch_sz, self.enc_units))
encoder = Encoder(vocab_inp_size, embedding_dim, units, BATCH_SIZE)
# sample input
sample_hidden = encoder.initialize_hidden_state()
sample_output, sample_hidden = encoder(example_input_batch, sample_hidden)
print ('Encoder output shape: (batch size, sequence length, units) {}'.format(sample_output.shape))
print ('Encoder Hidden state shape: (batch size, units) {}'.format(sample_hidden.shape))
# ... Bahdanau Attention, Decoder layers, and train_step defined, see link to full tensorflow code above ...
# Relevant training code
EPOCHS = 10
training_record = pd.DataFrame(columns = ['epoch', 'training_loss', 'validation_loss', 'epoch_time'])
for epoch in range(EPOCHS):
template = 'Epoch {}/{}'
print(template.format(epoch +1,
EPOCHS))
start = time.time()
enc_hidden = encoder.initialize_hidden_state()
total_loss = 0
total_val_loss = 0
for (batch, (inp, targ)) in enumerate(dataset.take(steps_per_epoch)):
batch_loss = train_step(inp, targ, enc_hidden)
total_loss += batch_loss
if batch % 100 == 0:
template = 'batch {} ============== train_loss: {}'
print(template.format(batch +1,
round(batch_loss.numpy(),4)))
I was trying to do the same thing and getting the exact same error. The problem was that weights in the Embedding layer is currently deprecated. Changing weights= to embeddings_initializer= worked for me.
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim,
embeddings_initializer=tf.keras.initializers.Constant(embedding_matrix),
trainable=False)
firslty : load pretrained embedding matrix using
def pretrained_embeddings(file_path, EMBEDDING_DIM, VOCAB_SIZE, word2idx):
# 1.load in pre-trained word vectors #feature vector for each word
print("graph in function",tf.get_default_graph())
print('Loading word vectors...')
word2vec = {}
with open(os.path.join(file_path+'.%sd.txt' % EMBEDDING_DIM), errors='ignore', encoding='utf8') as f:
# is just a space-separated text file in the format:
# word vec[0] vec[1] vec[2] ...
for line in f:
values = line.split()
word = values[0]
vec = np.asarray(values[1:], dtype='float32')
word2vec[word] = vec
print('Found %s word vectors.' % len(word2vec))
# 2.prepare embedding matrix
print('Filling pre-trained embeddings...')
num_words = VOCAB_SIZE
# initialization by zeros
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
for word, i in word2idx.items():
if i < VOCAB_SIZE:
embedding_vector = word2vec.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all zeros.
embedding_matrix[i] = embedding_vector
return embedding_matrix
2-then update Encoder class as following:
class Encoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, enc_units, batch_sz,embedding_matrix):
super(Encoder, self).__init__()
self.batch_sz = batch_sz
self.enc_units = enc_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim, weights=[embedding_matrix])
self.gru = tf.keras.layers.GRU(self.enc_units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
def call(self, x, hidden):
x = self.embedding(x)
output, state = self.gru(x, initial_state = hidden)
return output, state
def initialize_hidden_state(self):
return tf.zeros((self.batch_sz, self.enc_units))
3-calling function that loads pre-trained embedding to get embedding matrix
embedding_matrix = pretrained_embeddings(file_path, EMBEDDING_DIM,vocab_size, word2idx)
encoder = Encoder(vocab_inp_size, embedding_dim, units, BATCH_SIZE,embedding_matrix)
# sample input
sample_hidden = encoder.initialize_hidden_state()
sample_output, sample_hidden = encoder(example_input_batch, sample_hidden)
print ('Encoder output shape: (batch size, sequence length, units) {}'.format(sample_output.shape))
print ('Encoder Hidden state shape: (batch size, units) {}'.format(sample_hidden.shape))
Note : this works on tensorflow 1.13.1 well