Can't handle that problem for several days
I'm new to NLP and the solution is probably very simple
class QAModel(pl.LightningDataModule):
def __init__(self):
super().__init__()
self.model = MT5ForConditionalGeneration.from_pretrained(MODEL_NAME, return_dict=True)
def forward(self, input_ids, attention_mask, labels=None):
output = model(
input_ids=input_ids,
attention_mask=attention_mask,
labels=labels
)
return output.loss, output.logits
def training_step(self, batch, batch_idx):
input_ids = batch['input_ids']
attention_mask = batch['attention_mask']
labels = batch['labels']
loss, outputs = self(input_ids, attention_mask, labels)
self.log('train_loss', loss, prog_bar=True, logger=True)
return loss
def validation_step(self, batch, batch_idx):
input_ids = batch['input_ids']
attention_mask = batch['attention_mask']
labels = batch['labels']
loss, outputs = self(input_ids, attention_mask, labels)
self.log('val_loss', loss, prog_bar=True, logger=True)
return loss
def test_step(self, batch, batch_idx):
input_ids = batch['input_ids']
attention_mask = batch['attention_mask']
labels = batch['labels']
loss, outputs = self(input_ids, attention_mask, labels)
self.log('test_loss', loss, prog_bar=True, logger=True)
return loss
def configure_optimizers(self):
return AdamW(self.parameters(), lr=0.0001)
model = QAModel()
from pytorch_lightning.callbacks import ModelCheckpoint
checkpoint_callback = ModelCheckpoint(
dirpath='/content/checkpoints',
filename='best-checkpoint',
save_top_k=1,
verbose=True,
monitor='val_loss',
mode='min'
)
trainer = pl.Trainer(
checkpoint_callback=checkpoint_callback,
max_epochs=N_EPOCHS,
gpus=1,
progress_bar_refresh_rate=30
)
trainer.fit(model, data_module)
Running this code gives me
AttributeError: 'QAModel' object has no attribute 'automatic_optimization'
after fit() function
Probably, the problem is in MT5ForConditionalGeneration, as after passing it to funtion() we've got the same error
Try inheriting pl.LightingModule instead of pl.LightningDataModule. It is the right choice for defining a model class.
Related
I am trying port Keras Semantic Similarity example to Pytorch Lightening. I followed all the necessary steps from keras example but somehow loss not at all decreasing. i am just curious where did i make mistake?
Here is the colab link
PyTorch Model
MAX_LENGTH = 128
from transformers import BertModel,AdamW
import pytorch_lightning as pl
class BertWithLSTM(pl.LightningModule):
def __init__(self,train_path,test_path, val_path,num_classes):
super().__init__()
self.bert = BertModel.from_pretrained("bert-base-uncased", output_attentions=True)
self.bert.trainable = False
self.lstm = nn.LSTM(input_size=768, hidden_size=64, batch_first=True, bidirectional=True, dropout=0.3)
self.dropout = nn.Dropout(p=0.3)
self.output = nn.Linear(MAX_LENGTH*2,3)
self.num_classes = num_classes
self.train_path = train_path
self.test_path = test_path
self.val_path = val_path
self.criterion = nn.CrossEntropyLoss()
def train_dataloader(self):
train_dataset = CustomDataset(csv_path=self.train_path)
return DataLoader(dataset=train_dataset, batch_size=32, shuffle=False)
def test_dataloader(self):
test_dataset = CustomDataset(csv_path=self.test_path)
return DataLoader(dataset=test_dataset)
def val_dataloader(self):
val_dataset = CustomDataset(csv_path=self.val_path)
return DataLoader(dataset=val_dataset, batch_size=16, shuffle=True)
def training_step(self,train_batch, batch_idx):
input_ids,attention_mask,token_type_ids,labels = train_batch
output = self(input_ids, attention_mask, token_type_ids, labels)
loss = F.cross_entropy(output, labels)
self.log("training_loss", loss, on_epoch=True, prog_bar=True, logger=True)
return loss
def validation_step(self,val_batch, batch_idx):
input_ids,attention_mask,token_type_ids,labels = val_batch
output = self(input_ids, attention_mask, token_type_ids, labels)
loss = F.cross_entropy(output, labels)
self.log("val_loss", loss, on_epoch=True, prog_bar=True, logger=True)
def forward(self, input_ids,attention_mask,token_type_ids, label ):
bert_output = self.bert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)
sequence_output = bert_output.last_hidden_state
output, (hidden_state, c_n) = self.lstm(sequence_output)
avg_pool = torch.mean(output, dim=1)
max_pool,indices = torch.max(output,dim=1)
output = torch.cat((avg_pool,max_pool),1)
output = self.output(output)
return output
def configure_optimizers(self):
optimizer = torch.optim.AdamW(self.parameters())
return optimizer
Willing to train a custom, stateful RNN, but failing to do it so over batches. I followed documentation of tf.keras.layers.RNN, but still workless.
Stand-alone code:
import tensorflow as tf
class RecurrentCell(tf.keras.layers.Layer):
def __init__(self,
units,
**kwargs):
self.units = units
self.state_size = units
self.output_size = 5
super(RecurrentCell, self).__init__(**kwargs)
def build(self, input_shape):
self.training_params = self.add_weight(shape=(1, self.output_size),
initializer='uniform',
name='kernel')
self.built = True
def call(self, inputs, states):
new_states = states
output = inputs + self.training_params
return output, [new_states]
def get_initial_state(self, inputs=None, batch_size=10,dtype=np.float32):
return tf.zeros( tuple([batch_size]) + tuple([self.state_size]), dtype=np.float32)
class RecurrentModel(tf.keras.Model):
def __init__(self, **kwargs):
super(RecurrentModel,self).__init__()
self.recurrent_layer =tf.keras.layers.RNN(RecurrentCell(units=5),
return_sequences=True,
stateful=True,
)
def call(self, inputs):
return self.recurrent_layer(inputs)
class CustomCallback(tf.keras.callbacks.Callback):
def on_train_batch_begin(self, batch, logs=None):
keys = list(logs.keys())
print("...Training: start of batch {}; got log keys: {}".format(batch, keys))
model = RecurrentModel()
model.compile(optimizer="adam", loss="mse")
batch_size = 2
time_index = 10
features = 5
inputs = tf.random.uniform((batch_size, time_index, features))
outputs = tf.random.uniform((batch_size, time_index, features))
model.fit(x=inputs, y=outputs, batch_size=2, shuffle=False, epochs=10, callbacks=[CustomCallback()])
In particular, documentation seems to be also inconsistent with Sequential model (haven't tried the functional API).
Thanks a lot!
matias
def encoder():
input_layer = Input(batch_shape=(None, 13, 128))
h= layer(input_layer)
h= Masking(mask_value=0.0)(h)
h, hidden_layer, cell_layer = LSTM(512, return_state=True)(h)
model = Model(inputs = input_layer, outputs = [hidden_layer, cell_layer])
return model
model=encoder()
model.summary()
class Decoder(Model):
def __init__(self):
super(Decoder, self).__init__()
self.embedding_layer = Embedding(input_dim=max_tokens+1, output_dim=128, mask_zero=True)
self.lstm_layer = LSTM(512,
return_state=True, return_sequences=True)
self.dense_layer = Dense(units=max_tokens+1)
def call(self,inputer,hidden_layer=None,cell_layer=None):
x=self.embedding_layer(inputer)
if hidden_layer!=None and cell_layer!=None:
x, h, c = self.lstm_layer(x, initial_state=[hidden_layer, cell_layer])
else:
x, h, c = self.lstm_layer(x)
x=self.dense_layer(x)
return x,h,c
decoder=Decoder()
for eng,germ in train.take(1):
y,hidden,cell = decoder(germ)
#tf.function
def loss_fn(en_input, germ_input, germ_output, loss):
with tf.GradientTape() as tape:
enc_hidden_s, enc_cell_s = model(en_input)
dec_output, dec_hidden_s, dec_cell_s = decoder(germ_input, enc_hidden_s,enc_cell_s)
loss_value = loss(germ_output, dec_output)
return loss_value, tape.gradient(loss_value, variables)
def fit_german_shape(german):
input_data = german[:,:-1]
output_data = german[:,1:]
return input_data,output_data
def training(train_data, test_data,optimizer, loss,epochs=5):
batch_num=0
batch_num2=0
epoch_loss=0
epoch_loss2=0
for english,germany in train:
germany_in,germany_out=fit_german_shape(germany)
loss2, grad= loss_fn(english, germany_in, germany_out, loss)
optimizer.apply_gradients(zip(grad,model.trainable_variables + decoder.trainable_variables))
epoch_loss=epoch_loss+loss2
batch_num=batch_num+1
avg_loss=epoch_loss/batch_num
avg_loss3=String(avg_loss1)
print("In this train epoch, the loss is"+ave_loss3)
for english2,germany2 in test:
germany_in2,germany_out2=fit_german_shape(germany2)
hidden_state,cell_state=model(en)
pred,temp1,temp2=decoder(germany_in2,hidden_state,cell_state)
loss, temp3 = loss_fn(english2, germany_in2, germany_out2)
epoch_loss2=loss+epoch_loss2
batch_num=batch_num+1
avg_loss2=epoch_loss2/batch_num2
avg_loss4=String(avg_loss2)
print("In this test epoch, the loss is"+ave_loss4)
return avg_loss,avg_loss2
When I use this model to translate German to English, it report the error that "Tried to convert 'y' to a tensor and failed. Error: None values not supported." Error may occur in the decoder to assign value to x,h,c, but I dont know why cannot convert y to a tensor.
I am using Tensorflow 2.1 to create custom models and custom training loops. My aim is to compare the accuracy of different configurations of my neural network. Specifically, in this case, I am comparing the reconstruction error of an AutoEncoder with varying latent dimension. Hence, I am training my network for one latent dimension then computing the test error and then I redo this process for another latent dimension, and so on. With this process I want to create plots like this:
Plot example:
To speed up the training I want to use the #tf.function decorator for the BackPropagation part of my training loop. However, when I try to train several different networks, looping over the latent dimension I get an error. See below:
ValueError: in converted code:
<ipython-input-19-78bafad21717>:41 grad *
loss_value = tf.losses.mean_squared_error(inputs, model(inputs))
/tensorflow-2.1.0/python3.6/tensorflow_core/python/keras/engine/base_layer.py:778 __call__
outputs = call_fn(cast_inputs, *args, **kwargs)
<ipython-input-19-78bafad21717>:33 call *
x_enc = self.encoder(inp)
/tensorflow-2.1.0/python3.6/tensorflow_core/python/keras/engine/base_layer.py:778 __call__
outputs = call_fn(cast_inputs, *args, **kwargs)
<ipython-input-19-78bafad21717>:9 call *
x = self.dense1(inp)
/tensorflow-2.1.0/python3.6/tensorflow_core/python/keras/engine/base_layer.py:748 __call__
self._maybe_build(inputs)
/tensorflow-2.1.0/python3.6/tensorflow_core/python/keras/engine/base_layer.py:2116 _maybe_build
self.build(input_shapes)
/tensorflow-2.1.0/python3.6/tensorflow_core/python/keras/layers/core.py:1113 build
trainable=True)
/tensorflow-2.1.0/python3.6/tensorflow_core/python/keras/engine/base_layer.py:446 add_weight
caching_device=caching_device)
/tensorflow-2.1.0/python3.6/tensorflow_core/python/training/tracking/base.py:744 _add_variable_with_custom_getter
**kwargs_for_getter)
/tensorflow-2.1.0/python3.6/tensorflow_core/python/keras/engine/base_layer_utils.py:142 make_variable
shape=variable_shape if variable_shape else None)
/tensorflow-2.1.0/python3.6/tensorflow_core/python/ops/variables.py:258 __call__
return cls._variable_v1_call(*args, **kwargs)
/tensorflow-2.1.0/python3.6/tensorflow_core/python/ops/variables.py:219 _variable_v1_call
shape=shape)
/tensorflow-2.1.0/python3.6/tensorflow_core/python/ops/variables.py:65 getter
return captured_getter(captured_previous, **kwargs)
/tensorflow-2.1.0/python3.6/tensorflow_core/python/eager/def_function.py:502 invalid_creator_scope
"tf.function-decorated function tried to create "
ValueError: tf.function-decorated function tried to create variables on non-first call.
I do not get this error when I remove #tf.function decorator. I believe if it has something to do with Tensorflow creating a computational graph when I use the decorator and this graph remains when I create another instance of my network. Thus, sparking an error since the old graph does not match the new instance of the network. But I am not sure about this at all, since I believe I am missing something fundamental about Tensorflow here!
Below is a very simply version of my code recreating the error. I have tried to remove all the unnecessary parts of the code to make it easier to read and debug. Furthermore, I am generating a very simply training and test set just for the sake of this question.
I have already tried the tf.keras.backend.clear_session() function without any luck.
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# Encoder
class build_encoder(tf.keras.Model):
def __init__(self,latent_dim):
super(build_encoder, self).__init__()
self.dense1 = tf.keras.layers.Dense(32, activation='relu',use_bias=True)
self.dense2 = tf.keras.layers.Dense(latent_dim, activation='relu',use_bias=True)
def call(self, inp):
x = self.dense1(inp)
x = self.dense2(x)
return x
# Decoder
class build_decoder(tf.keras.Model):
def __init__(self,):
super(build_decoder, self).__init__()
self.dense1 = tf.keras.layers.Dense(32, activation='relu',use_bias=True)
self.dense2 = tf.keras.layers.Dense(10, activation='relu',use_bias=True)
def call(self, inp):
x = self.dense1(inp)
x = self.dense2(x)
return x
# Full Autoencoder
class Autoencoder(tf.keras.Model):
def __init__(self,latent_dim=5):
super(Autoencoder, self).__init__()
self.encoder = build_encoder(latent_dim)
self.decoder = build_decoder()
def call(self, inp):
x_enc = self.encoder(inp)
x_dec = self.decoder(x_enc)
return x_dec
#### Here is the backpropagation with #tf.function decorator ####
#tf.function
def grad(model, inputs):
with tf.GradientTape() as tape:
loss_value = tf.losses.mean_squared_error(inputs, model(inputs))
return loss_value, tape.gradient(loss_value, model.trainable_variables)
# Training loop function
def train(x_train, model, num_epochs, batch_size,optimizer):
train_loss = []
for epoch in range(num_epochs):
tf.random.shuffle(x_train)
for i in range(0, len(x_train), batch_size):
x_inp = x_train[i: i + batch_size]
loss_value, grads = grad(model, x_inp)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
train_loss.append(tf.reduce_mean(tf.losses.mean_squared_error(x_train, model(x_train))).numpy())
if epoch % 100 == 0:
print("Epoch: {}, Train loss: {:.9f}".format(epoch, train_loss[epoch]))
return train_loss
#### Generating simple training and test data
num_train = 10000
num_test = 1000
x_train = s = np.random.uniform(0,1,(num_train,10)).astype(np.float32)
x_train[:,6:10] = 0
x_test = s = np.random.uniform(0,1,(num_test,10)).astype(np.float32)
x_test[:,6:10] = 0
###
batch_size = 8
num_epochs = 10000
test_loss = []
# Looping over the latent dimensions
for latent_dim in range(1,10):
model = Autoencoder(latent_dim=3) # Creating an instance of my Autoencoder
optimizer = tf.keras.optimizers.Adam(learning_rate=0.00005) # Defining an optimizer
train_loss = train(x_train, model=model, num_epochs=num_epochs, batch_size=batch_size, optimizer=optimizer) # Training the network
test_loss.append(tf.reduce_mean(tf.losses.mean_squared_error(x_test, model(x_test))).numpy())
plt.figure()
plt.plot(test_loss,linewidth=1.5)
plt.grid(True)
plt.show()
There's an error in the code snippet you provided.
I changed last Dense layer unit from 6 to 10.
# Decoder
class build_decoder(tf.keras.Model):
def __init__(self,):
super(build_decoder, self).__init__()
self.dense1 = tf.keras.layers.Dense(32, activation='relu',use_bias=True)
self.dense2 = tf.keras.layers.Dense(10, activation='relu',use_bias=True)
def call(self, inp):
x = self.dense1(inp)
x = self.dense2(x)
return x
As for your question on training multiple model.
The error message "ValueError: tf.function-decorated function tried to create variables on non-first call" means that the function decorated by #tf.function is creating a new variable on its next iteration, this is not allowed as this function is turned into a graph.
I have modified your back propagation method, I commented out your original code to observe the difference.
#### Here is the backpropagation with #tf.function decorator ####
# #tf.function
# def grad(model, inputs):
# with tf.GradientTape() as tape:
# loss_value = tf.losses.mean_squared_error(inputs, model(inputs))
# return loss_value, tape.gradient(loss_value, model.trainable_variables)
#tf.function
def MSE(y_true, y_pred):
return tf.keras.losses.MSE(y_true, y_pred)
def backprop(inputs, model):
with tf.GradientTape() as tape:
loss_value = MSE(inputs, model(inputs))
return loss_value, tape.gradient(loss_value, model.trainable_variables)
def gradient_func(model, inputs):
return backprop(inputs, model)
The main culprit of your original code was the calling of model(inputs) as an input in the Loss Function, when you decorate #tf.function in a function it is inherited on all the functions inside, this means the Loss function is optimized.
Also a way to train multiple model without rewriting single variable, is to put them into array.
model_array = [0]
# Looping over the latent dimensions
for latent_dim in range(1,10):
model_array.append(Autoencoder(latent_dim))
# Creating an instance of my Autoencoder
optimizer = tf.keras.optimizers.Adam(learning_rate=0.00005) # Defining an optimizer
train_loss = train(x_train, model=model_array[latent_dim], num_epochs=num_epochs, batch_size=batch_size, optimizer=optimizer) # Training the network
test_loss.append(tf.reduce_mean(tf.losses.mean_squared_error(x_test, model_array[latent_dim](x_test))).numpy())
This will arrange model into array, easier to be accessed and debugged.
Here is the complete modified code.
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# Encoder
class build_encoder(tf.keras.Model):
def __init__(self,latent_dim):
super(build_encoder, self).__init__()
self.dense1 = tf.keras.layers.Dense(32, activation='relu',use_bias=True)
self.dense2 = tf.keras.layers.Dense(latent_dim, activation='relu',use_bias=True)
def call(self, inp):
x = self.dense1(inp)
x = self.dense2(x)
return x
# Decoder
class build_decoder(tf.keras.Model):
def __init__(self,):
super(build_decoder, self).__init__()
self.dense1 = tf.keras.layers.Dense(32, activation='relu',use_bias=True)
self.dense2 = tf.keras.layers.Dense(10, activation='relu',use_bias=True)
def call(self, inp):
x = self.dense1(inp)
x = self.dense2(x)
return x
# Full Autoencoder
class Autoencoder(tf.keras.Model):
def __init__(self,latent_dim=5):
super(Autoencoder, self).__init__()
self.encoder = build_encoder(latent_dim)
self.decoder = build_decoder()
def call(self, inp):
x_enc = self.encoder(inp)
x_dec = self.decoder(x_enc)
return x_dec
#### Here is the backpropagation with #tf.function decorator ####
# #tf.function
# def grad(model, inputs):
# with tf.GradientTape() as tape:
# loss_value = tf.losses.mean_squared_error(inputs, model(inputs))
# return loss_value, tape.gradient(loss_value, model.trainable_variables)
#tf.function
def MSE(y_true, y_pred):
return tf.keras.losses.MSE(y_true, y_pred)
def backprop(inputs, model):
with tf.GradientTape() as tape:
loss_value = MSE(inputs, model(inputs))
return loss_value, tape.gradient(loss_value, model.trainable_variables)
def gradient_func(model, inputs):
return backprop(inputs, model)
# Training loop function
def train(x_train, model, num_epochs, batch_size,optimizer):
train_loss = []
for epoch in range(num_epochs):
tf.random.shuffle(x_train)
for i in range(0, len(x_train), batch_size):
x_inp = x_train[i: i + batch_size]
loss_value, grads = gradient_func(model, x_inp)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
train_loss.append(tf.reduce_mean(tf.losses.mean_squared_error(x_train, model(x_train))).numpy())
if epoch % 100 == 0:
print("Epoch: {}, Train loss: {:.9f}".format(epoch, train_loss[epoch]))
return train_loss
#### Generating simple training and test data
num_train = 10000
num_test = 1000
x_train = s = np.random.uniform(0,1,(num_train,10)).astype(np.float32)
x_train[:,6:10] = 0
x_test = s = np.random.uniform(0,1,(num_test,10)).astype(np.float32)
x_test[:,6:10] = 0
###
batch_size = 8
num_epochs = 10000
test_loss = []
model_array = [0]
# Looping over the latent dimensions
for latent_dim in range(1,10):
model_array.append(Autoencoder(latent_dim))
# Creating an instance of my Autoencoder
optimizer = tf.keras.optimizers.Adam(learning_rate=0.00005) # Defining an optimizer
train_loss = train(x_train, model=model_array[latent_dim], num_epochs=num_epochs, batch_size=batch_size, optimizer=optimizer) # Training the network
test_loss.append(tf.reduce_mean(tf.losses.mean_squared_error(x_test, model_array[latent_dim](x_test))).numpy())
plt.figure()
plt.plot(range(1,10),test_loss,linewidth=1.5)
plt.grid(True)
plt.show()
There is also a brief discussion about #tf.function and AutoGraphs in TF Documentation in this link.
Feel free to ask questions and hope this helps you.
I am using the code below that implements an autoencoder. How can I feed the autoencoder with data for training and testing?
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
class Autoencoder(object):
def __init__(self, n_input, n_hidden, transfer_function=tf.nn.softplus, optimizer = tf.train.AdamOptimizer()):
self.n_input = n_input
self.n_hidden = n_hidden
self.transfer = transfer_function
network_weights = self._initialize_weights()
self.weights = network_weights
# model
self.x = tf.placeholder(tf.float32, [None, self.n_input])
self.hidden = self.transfer(tf.add(tf.matmul(self.x, self.weights['w1']), self.weights['b1']))
self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2'])
# cost
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
self.optimizer = optimizer.minimize(self.cost)
init = tf.global_variables_initializer()
self.sess = tf.Session()
self.sess.run(init)
def _initialize_weights(self):
all_weights = dict()
all_weights['w1'] = tf.get_variable("w1", shape=[self.n_input, self.n_hidden],
initializer=tf.contrib.layers.xavier_initializer())
all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype=tf.float32))
all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden, self.n_input], dtype=tf.float32))
all_weights['b2'] = tf.Variable(tf.zeros([self.n_input], dtype=tf.float32))
return all_weights
def partial_fit(self, X):
cost, opt = self.sess.run((self.cost, self.optimizer), feed_dict={self.x: X})
return cost
def calc_total_cost(self, X):
return self.sess.run(self.cost, feed_dict = {self.x: X})
def transform(self, X):
return self.sess.run(self.hidden, feed_dict={self.x: X})
def generate(self, hidden = None):
if hidden is None:
hidden = self.sess.run(tf.random_normal([1, self.n_hidden]))
return self.sess.run(self.reconstruction, feed_dict={self.hidden: hidden})
def reconstruct(self, X):
return self.sess.run(self.reconstruction, feed_dict={self.x: X})
def getWeights(self):
return self.sess.run(self.weights['w1'])
def getBiases(self):
return self.sess.run(self.weights['b1'])
# I instantiate the class autoencoder, 5 is the dimension of a raw input,
2 is the dimension of the hidden layer
autoencoder = Autoencoder(5, 2, transfer_function=tf.nn.softplus, optimizer
= tf.train.AdamOptimizer())
# I prepare my data**
IRIS_TRAINING = "C:\\Users\\Desktop\\iris_training.csv"
#Feeding data to Autoencoder ???
Train and Test ??
How can I train this model with csv file data? I think I need to run the following instruction as _, c = sess.run([optimizer, cost], feed_dict={self.x: batch_ofd_ata}) inside a loop of epochs, but I am struggling with it.
Check out Stanford CS20SI's tutorial.
https://github.com/chiphuyen/tf-stanford-tutorials/blob/master/examples/05_csv_reader.py