How do I load a checkpoint using tensorflow in eager execution mode? - tensorflow

I am using tensorflow 1.7.0 in eager execution mode. I have the model working, but none of the examples that I have found for saving the model work.
This is the code that I am using:
checkpoint_directory ='./JokeWords/'
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
checkpoint = tfe.Checkpoint(model=model,optimizer=optimizer) # save as "x"
checkpoint.restore(tf.train.latest_checkpoint(checkpoint_directory))
evaluate(model,jokes,2,32)
....
checkpoint.save(file_prefix=checkpoint_prefix)
I have trained the model and use evaluate to check the results when loading from a restart. Each time I get a random result from evaluate, meaning that the model is not loading from the data, but instead only having random weights.
How do I save the model? It can take days to train one of these.
Edit. Here is the model:
class EagerRNN(tfe.Network):
def __init__(self,embedding, hidden_dim, num_layers, keep_ratio):
super(EagerRNN, self).__init__()
self.keep_ratio = keep_ratio
self.cells = self._add_cells([
tf.nn.rnn_cell.BasicLSTMCell(num_units=hidden_dim)
for _ in range(num_layers)
])
self.backcells = self._add_cells([
tf.nn.rnn_cell.BasicLSTMCell(num_units=hidden_dim)
for _ in range(num_layers)
])
self.linear = layers.Dense(embedding. vocab_size, kernel_initializer=tf.random_uniform_initializer(-0.1, 0.1))
self.backlinear = layers.Dense(embedding. vocab_size, kernel_initializer=tf.random_uniform_initializer(-0.1, 0.1))
self.attension = layers.Dense(hidden_dim, kernel_initializer=tf.random_uniform_initializer(-0.1, 0.1))
def call(self, input_seq,seq_lengths, training):
lengths=[i[0] for i in seq_lengths]
nRotations=max(lengths)
batchSize=input_seq.shape[0]
input_seq2 = tf.unstack(input_seq, num=int(input_seq.shape[1]), axis=1)
atten = None
state = self.cells[0].zero_state(batchSize, tf.float32)
for i in range(0,nRotations):
for j in range(0,len(self.cells)):
c=self.cells[j]
inp=input_seq2[i]
output, state = c(inp, state)
#input_seq2[i]=(output)
if atten==None:
atten =self.linear(output)
else:
atten=atten+self.linear(output)
for i in range(nRotations-1,-1,-1):
for j in range(0,len(self.backcells)):
c=self.backcells[j]
inp=input_seq2[i]
output, state = c(inp, state)
#input_seq2[i]=(output)
atten=atten+self.backlinear(output)
#input_seq = tf.stack(input_seq2[0:nRotations], axis=1)
atten=self.attension(atten)
if training:
input_seq = tf.nn.dropout(input_seq, self.keep_ratio)
# Returning a list instead of a single tensor so that the line:
# y = self.rnn(y, ...)[0]
# in PTBModel.call works for both this RNN and CudnnLSTM (which returns a
# tuple (output, output_states).
return input_seq,state,atten
def _add_cells(self, cells):
# "Magic" required for keras.Model classes to track all the variables in
# a list of Layer objects.
# TODO(ashankar): Figure out API so user code doesn't have to do this.
for i, c in enumerate(cells):
setattr(self, "cell-%d" % i, c)
return cells
class EagerLSTM_Model(tfe.Network):
"""LSTM for word language modeling.
Model described in:
(Zaremba, et. al.) Recurrent Neural Network Regularization
http://arxiv.org/abs/1409.2329
See also:
https://github.com/tensorflow/models/tree/master/tutorials/rnn/ptb
"""
def __init__(self,
embedding,
hidden_dim,
num_layers,
dropout_ratio,
use_cudnn_rnn=True):
super(EagerLSTM_Model, self).__init__()
self.keep_ratio = 1 - dropout_ratio
self.use_cudnn_rnn = use_cudnn_rnn
self.embedding = embedding
if self.use_cudnn_rnn:
self.rnn = cudnn_rnn.CudnnLSTM(
num_layers, hidden_dim, dropout=dropout_ratio)
else:
self.rnn = EagerRNN(embedding,hidden_dim, num_layers, self.keep_ratio)
self.unrnn = EagerUnRNN(embedding,hidden_dim, num_layers, self.keep_ratio)
def callRNN(self, input_seq,seq_lengths, training):
y = self.embedding.callbatchword(input_seq)
if training:
y = tf.nn.dropout(y, self.keep_ratio)
y,state,atten = self.rnn.call(y,seq_lengths, training=training)
return state,atten
def callUnRNN (self,state,atten,seq_lengths, training ):
x,state = self.unrnn(state,atten,seq_lengths,training=training)
#b=tf.reshape(y, self._output_shape)
#c=self.linear(b)
return x

tfe.Network is not (easily) Checkpointable and it will soon be deprecated. Prefer to subclass tf.Keras.Model instead. So if you change class EagerRNN(tfe.Network) to class EagerRNN(tf.keras.Model) and class EagerLSTM_Model(tfe.Network) to class EagerLSTM_Model(tf.keras.Model), checkpoint.save(file_prefix=checkpoint_prefix) should actually save all your variables and checkpoint.restore(tf.train.latest_checkpoint(checkpoint_directory)) should restore them.

Related

How to create a keras layer with a custom gradient *and learnable parameters* in TF2.0?

this is a similar question to: How to create a keras layer with a custom gradient in TF2.0?
Only, I would like to introduce a learnable parameter into the custom layer that I am training.
Here's a toy example of my current approach here:
# Method for calculation custom gradient
#tf.custom_gradient
def scaler(x, s):
def grad(upstream):
dy_dx = s
dy_ds = x
return dy_dx, dy_ds
return x * s, grad
# Keras Layer with trainable parameter
class TestLayer(tf.keras.layers.Layer):
def build(self, input_shape):
self.scale = self.add_weight("scale",
shape=[1,],
initializer=tf.keras.initializers.Constant(value=2.0),
trainable=True)
def call(self, inputs):
return scaler(inputs, self.scale)
# Creates Keras Model that uses the layer
def Model():
x_in = tf.keras.layers.Input(shape=(1,))
x_out = TestLayer()(x_in)
return tf.keras.Model(inputs=x_in, outputs=x_out, name="fp8_test")
# Create toy dataset, want to learn `scale` such to satisfy 5 = 2 * scale (i.e, `scale` should learn ~2.5)
def Dataset():
inps = tf.ones(shape=(10**5,)) * 2 # inputs
expected = tf.ones(shape=(10**5,)) * 5 # targets
data_in = tf.data.Dataset.from_tensors(inps)
data_exp = tf.data.Dataset.from_tensors(expected)
dataset = tf.data.Dataset.zip((data_in, data_exp))
return dataset
model = Model()
model.summary()
dataset = Dataset()
# Use `MSE` loss and `SGD` optimizer
model.compile(
loss=tf.keras.losses.MSE,
optimizer=tf.keras.optimizers.SGD(),
)
model.fit(dataset, epochs=100)
This is failing with the following shape related error in the optimizer:
ValueError: Shapes must be equal rank, but are 1 and 2 for '{{node SGD/SGD/update/ResourceApplyGradientDescent}} = ResourceApplyGradientDescent[T=DT_FLOAT, use_locking=true](fp8_test/test_layer_1/ReadVariableOp/resource, SGD/Identity, SGD/IdentityN)' with input shapes: [], [], [100000,1].
I've been staring at the docs for a while, I'm a bit stumped as to why this isn't working, I would really appreciate any input on how to fix this toy example.
Thanks in advance.

" ValueError: Expecting KerasTensor which is from tf.keras.Input()". Error in prediction with dropout function

I am trying to predict uncertainty in a regression problem using Dropout during testing as per Yarin Gal's article. I created a class using Keras's backend function as provided by this stack overflow question's answer. The class takes a NN model as input and randomly drops neurons during testing to give a stochastic estimate rather than deterministic output for a time-series forecasting.
I create a simple encoder-decoder model as shown below for the forecasting with 0.1 dropout during training:
input_sequence = Input(shape=(lookback, train_x.shape[2]))
encoder = LSTM(128, return_sequences=False)(input_sequence)
r_vec = RepeatVector(forward_pred)(encoder)
decoder = LSTM(128, return_sequences=True, dropout=0.1)(r_vec) #maybe use dropout=0.1
output = TimeDistributed(Dense(train_y.shape[2], activation='linear'))(decoder)
# optimiser = optimizers.Adam(clipnorm=1)
enc_dec_model = Model(input_sequence, output)
enc_dec_model.compile(loss="mean_squared_error",
optimizer="adam",
metrics=['mean_squared_error'])
enc_dec_model.summary()
After that, I define and call the DropoutPrediction class.
# Define the class:
class KerasDropoutPrediction(object):
def __init__(self ,model):
self.f = K.function(
[model.layers[0].input,
K.learning_phase()],
[model.layers[-1].output])
def predict(self ,x, n_iter=10):
result = []
for _ in range(n_iter):
result.append(self.f([x , 1]))
result = np.array(result).reshape(n_iter ,x.shape[0] ,x.shape[1]).T
return result
# Call the object:
kdp = KerasDropoutPrediction(enc_dec_model)
y_pred_do = kdp.predict(x_test,n_iter=100)
y_pred_do_mean = y_pred_do.mean(axis=1)
However, in the line
kdp = KerasDropoutPrediction(enc_dec_model), when I call the LSTM model,
I got the following error message which says the input has to be a Keras Tensor. Can anyone help me with this error?
Error Message:
ValueError: Found unexpected instance while processing input tensors for keras functional model. Expecting KerasTensor which is from tf.keras.Input() or output from keras layer call(). Got: 0
To activate Dropout at inference time, you simply have to specify training=True (TF>2.0) in the layer of interest (in the last LSTM layer in your case)
with training=False
inp = Input(shape=(10, 1))
x = LSTM(1, dropout=0.3)(inp, training=False)
m = Model(inp,x)
# m.compile(...)
# m.fit(...)
X = np.random.uniform(0,1, (1,10,1))
output = []
for i in range(0,100):
output.append(m.predict(X)) # always the same
with training=True
inp = Input(shape=(10, 1))
x = LSTM(1, dropout=0.3)(inp, training=True)
m = Model(inp,x)
# m.compile(...)
# m.fit(...)
X = np.random.uniform(0,1, (1,10,1))
output = []
for i in range(0,100):
output.append(m.predict(X)) # always different
In your example, this becomes:
input_sequence = Input(shape=(lookback, train_x.shape[2]))
encoder = LSTM(128, return_sequences=False)(input_sequence)
r_vec = RepeatVector(forward_pred)(encoder)
decoder = LSTM(128, return_sequences=True, dropout=0.1)(r_vec, training=True)
output = TimeDistributed(Dense(train_y.shape[2], activation='linear'))(decoder)
enc_dec_model = Model(input_sequence, output)
enc_dec_model.compile(
loss="mean_squared_error",
optimizer="adam",
metrics=['mean_squared_error']
)
enc_dec_model.fit(train_x, train_y, epochs=10, batch_size=32)
and the KerasDropoutPrediction:
class KerasDropoutPrediction(object):
def __init__(self, model):
self.model = model
def predict(self, X, n_iter=10):
result = []
for _ in range(n_iter):
result.append(self.model.predict(X))
result = np.array(result)
return result
kdp = KerasDropoutPrediction(enc_dec_model)
y_pred_do = kdp.predict(test_x, n_iter=100)
y_pred_do_mean = y_pred_do.mean(axis=0)

GCN model is not learning

I am trying to implement a GCN layer using tensorflow, but it is not learning. Can someone check what potential issue could be?
I have tried normalizing the adjacency matrix and even replaced it with identity so that the GCN layer becomes a simple MLP. But there is no change. I think, I have made some fundamental/silly mistake in my implementation which I am not able to find. Can someone let me know what the issue could be?
!pip install numpy
!pip install tensorflow
!pip install spektral
#!pip install tqdm
import numpy as np
import tensorflow as
import spektral
def masked_cross_entropy_loss( labels,logits, mask ):
loss = tf.nn.softmax_cross_entropy_with_logits(labels,logits )
mask = tf.cast(mask, dtype=tf.float32)
# the step below is important, because we need to find mean of only masked nodes
# dividing the mask by its mean = mask * total_nodes/total_masked nodes, comes in handy when we try to take the mean of the loss in the final step
# the total number of nodes that are input to the function are cancelled out between two means of mask and loss
# What remains is only the total number of masked nodes in denominator.
mask /= tf.reduce_mean(mask)
loss *= mask
return tf.reduce_mean(loss)
def masked_accuracy( labels, logits, mask ):
accuracy_array = tf.equal(tf.argmax(logits, axis=1), tf.argmax(labels, axis=1))
accuracy_array = tf.cast(accuracy_array, dtype =tf.float32)
mask = tf.cast(mask, dtype = tf.float32)
mask/= tf.reduce_mean(mask)
accuracy_array *= mask
return tf.reduce_mean(accuracy_array)
class GCNLayer:
def __init__( self, A ):
self.A = A
def _transform( self, units, features, trans_func ):
if trans_func == 'dense':
features = tf.keras.layers.Dense(units)( features )
features = tf.cast(features, dtype=tf.float32)
return features
else:
raise Exception('Transformation function not implemented')
def _aggregate( self, features, agg_func ):
if agg_func == 'adj_matmul':
return self.A # features
else:
raise Exception('Aggregation function not implemented')
def _activate( self, features, activation ):
features = tf.keras.layers.Activation(activation)( features)
return features
def __call__( self, units, features, trans_func='dense', agg_func = 'adj_matmul', activation='relu' ):
features = self._transform(units, features, trans_func )
features = self._aggregate(features, agg_func)
if activation is not None:
features = self._activate(features, activation)
return features
class MyModel:
def __init__( self, A, node_features, node_labels, train_mask, val_mask, test_mask ):
self.A = A
self.node_features = node_features
self.node_labels = node_labels
self.train_mask = train_mask
self.val_mask = val_mask
self.test_mask = test_mask
self.gcn_layer1 = GCNLayer(self.A)
self.gcn_layer2 = GCNLayer(self.A)
def __call__( self ):
hidden_out = self.gcn_layer1(32, self.node_features, activation='relu' )
output = self.gcn_layer2(7, hidden_out, activation=None)
return output
def train( self, num_epochs=1, lr =0.01 ):
optimizer = tf.keras.optimizers.Adam(lr)
best_val_acc = 0.0
for e in range(num_epochs):
with tf.GradientTape() as t:
logits = self()
train_loss = masked_cross_entropy_loss( self.node_labels, logits, self.train_mask )
variables = t.watched_variables()
grads = t.gradient(train_loss, variables)
optimizer.apply_gradients(zip(grads, variables))
logits = self()
train_acc = masked_accuracy( self.node_labels, logits, self.train_mask )
val_acc = masked_accuracy( self.node_labels, logits, self.val_mask )
if val_acc > best_val_acc:
best_val_acc = val_acc
print(f'epoch={e},Training Loss:{train_loss.numpy()},Training Accuracy:{train_acc.numpy()}, Validation Accuracy:{val_acc.numpy()}')
model = MyModel(A, node_features, node_labels, train_mask, val_mask, test_mask)
model.train(num_epochs=200, lr=0.01)
Output
epoch=0,Training Loss:4.099794864654541,Training Accuracy:0.1428571492433548, Validation Accuracy:0.09000000357627869
epoch=1,Training Loss:6.438627243041992,Training Accuracy:0.20714285969734192, Validation Accuracy:0.16599997878074646
epoch=5,Training Loss:5.980966091156006,Training Accuracy:0.17142857611179352, Validation Accuracy:0.17399999499320984
epoch=13,Training Loss:3.9486303329467773,Training Accuracy:0.15000000596046448, Validation Accuracy:0.2800000011920929
epoch=40,Training Loss:5.182331562042236,Training Accuracy:0.23571430146694183, Validation Accuracy:0.29600000381469727
epoch=158,Training Loss:6.245728969573975,Training Accuracy:0.2142857164144516, Validation Accuracy:0.3160000145435333
Your model is learning but doesn't converge. Consider checking/adding data ,use simpler model, or tuning parameters while training (e.g: learning rate, batches size).
I found the problem in my code. I had instantiated the tf.keras.Dense layer in the call function which was causing the weights to be initialized on every epoch confusing the GradientTape.

Pytorch training loop for pix2code autoencoder model

I am trying to implement the pix2code Paper according to this blogpost using Pytorch instead of Tensorflow. I want to focus only on the HTML version from the post.
In short terms: a pretrained cnn is used to extract a featuremap from the screenshot. Then the feature map is given to an encoder. Another encoder is used to extract features from an given sequence, that is already preprocessed (tokenized and numericalized text). The features extracted by the cnn and rnn will be concatenated and given to an LSTM Decoder, which then generates HTML token by token.
I already build the model and everything else, but now I am stuck at implementing the training loop. Thanks to the tensorflow fit function, which is mysterious to me.
I would really appreciate if somebody out there could give me some hints!! I am wondering about how to init and use the hidden states. If I am missing some important information just let me know and I will provide.
class VM(nn.Module):
def __init__(self, nfeat = 128):
super(VM, self).__init__()
# Linear input size of feature map extracted by resnet
self.fc = nn.Linear(256*14*14, nfeat)
def forward(self, images):
out = images.reshape(images.size(0), -1)
out = self.fc(out)
out = F.relu(out)
return out
class LM(nn.Module):
def __init__(self, ntoken, ninp=200, nhidden = 256, nfeat = 128, nlayers=2):
super(LM, self).__init__()
self.nlayers = nlayers
self.nhid = nhidden
self.encoder = nn.Embedding(ntoken, ninp)
self.lstm = nn.LSTM(ninp, nhidden, nlayers)
self.decoder = nn.Linear(nhidden, nfeat)
def forward(self, seq, hidden):
embed = self.encoder(seq)
out, hidden = self.lstm(embed, hidden)
out = self.decoder(out)
out = F.relu(out)
return out, hidden
def init_hidden(self, bsz):
weight = next(self.parameters())
return (weight.new_zeros(self.nlayers, bsz, self.nhid), weight.new_zeros(self.nlayers, bsz, self.nhid))
class Decoder(nn.Module):
def __init__(self, ntoken, ninp, nhidden=512, nlayers=1):
super(Decoder, self).__init__()
self.nlayers = nlayers
self.nhid = nhidden
self.lstm = nn.LSTM(ninp, nhidden, nlayers)
self.fc = nn.Linear(nhidden, ntoken)
def forward(self, x, hidden):
out, hidden = self.lstm(x, hidden)
out = self.fc(out[:, -1, :])
return F.log_softmax(out, dim=1), hidden
def init_hidden(self, bsz):
weight = next(self.parameters())
return (weight.new_zeros(self.nlayers, bsz, self.nhid), weight.new_zeros(self.nlayers, bsz, self.nhid))
class Pix2Code(nn.Module):
def __init__(self, ntoken): # , ninp, nhidden=512, nlayers=1
super(Pix2Code, self).__init__()
self.vm = VM(nfeat=128)
self.lm = LM(ntoken=ntoken, ninp=200, nhidden = 256, nfeat = 128, nlayers=2)
self.decoder = Decoder(ntoken=ntoken, ninp=256)
def forward(self, imgs, seqs, hidden):
img_feats = vm(imgs)
seq_feats = lm(seqs)
features = torch.cat((vm_out, lm_out), 2)
return decoder(features), hidden
def init_hidden(self, bsz):
weight = next(self.parameters())
return (weight.new_zeros(self.nlayers, bsz, self.nhid), weight.new_zeros(self.nlayers, bsz, self.nhid))

Freeze sublayers in tensorflow 2

I have a model which is composed of custom layers. Each custom layer contains many tf.keras.layers. The problem is that if I want to freeze those layers after defining my model, the loop:
for i, layer in enumerate(model.layers):
print(i, layer.name)
only prints the "outer" custom layers and not those who exist inside. Is there any way to access the inner layers so I can freeze them?
an example of a custom layer from the official tf docs:
class MLPBlock(layers.Layer):
def __init__(self):
super(MLPBlock, self).__init__()
self.linear_1 = Linear(32)
self.linear_2 = Linear(32)
self.linear_3 = Linear(1)
def call(self, inputs):
x = self.linear_1(inputs)
x = tf.nn.relu(x)
x = self.linear_2(x)
x = tf.nn.relu(x)
return self.linear_3(x)
You can use keras callbacks. If you want to freeze your first layer after some certain amount of epochs, add this callback
class FreezeCallback(tf.keras.callbacks.Callback):
def __init__(self, n_epochs=10):
super().__init__()
self.n_epochs = n_epochs
def on_epoch_end(self, epoch, logs=None):
if epoch == self.n_epochs:
l = self.model.get_layer('first')
l.trainable = False
What you are doing in your update function is to replace the first Dense() layer with another Dense() layer, this time setting trainable = false.
While this works, I would update the 'update' function as following:
def updt(self):
self.dense1.trainable = False
Ok i came up with a solution.
An "update" function must be implemented inside the custom layer, which updates the inner layers so that they become non trainable.
Here is a sample code:
import tensorflow as tf
import numpy as np
layers = tf.keras.layers
seq_model = tf.keras.models.Sequential
class MDBlock(layers.Layer):
def __init__(self):
super(MDBlock, self).__init__()
self.dense1 = layers.Dense(784, name="first")
self.dense2 = layers.Dense(32, name="second")
self.dense3 = layers.Dense(32, name="third")
self.dense4 = layers.Dense(1, activation='sigmoid', name="outp")
def call(self, inputs):
x = self.dense1(inputs)
x = tf.nn.relu(x)
x = self.dense2(x)
x = tf.nn.relu(x)
x = self.dense3(x)
x = tf.nn.relu(x)
x = self.dense4(x)
return x
def updt(self):
self.dense1.trainable = False
def __str__(self):
return "\nd1:{0}\nd2:{1}\nd3:{2}\nd4:{3}".format(self.dense1.trainable, self.dense2.trainable,
self.dense3.trainable, self.dense4.trainable)
# define layer block
layer = MDBlock()
model = seq_model()
model.add(layers.Input(shape=(784,)))
model.add(layer)
# Use updt function to make layers non-trainable
for i, layer in enumerate(model.layers):
layer.updt()
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
# Generate dummy data
data = np.random.random((1000, 784))
labels = np.random.randint(2, size=(1000, 1))
# Train the model, iterating on the data in batches of 32 samples
model.fit(data, labels, epochs=10, batch_size=32)
# print block's layers state
for i, layer in enumerate(model.layers):
print(i, layer)