Add metadata in your RNN model after embedding layer with shared weights - tensorflow

I have a embedding matrix with shared weights for text threads after which I wanted to add metadata into my model. However, adding a new layer using same function as before for initializing model layers is giving me dimension errors. Can someone tell me how I can proceed?
def build_post_submodel(arch='cnn', isEmbedding = True):
"""
Creates and returns a function from Input to a chain of layer with shared weights for embedding matrix of different posts in our thread.
"""
if isEmbedding:
layers = [ Embedding(vocab_size,EMBEDDING_DIM,input_length=MAX_SEQUENCE_LENGTH)]
if arch == 'cnn':
layers.extend([
Conv1D(128, 5, activation='relu'),
MaxPooling1D(50),
Flatten()
])
elif arch == 'average':
layers.extend([
Masking(mask_value=WORD_PADDING_VALUE),
# Average()
Lambda(lambda x: K.mean(x, axis=1), output_shape=lambda s: (s[0], s[2]))
])
else:
raise ValueError('Unknown post architecture: %s' % arch)
else:
layers = [Conv1D(128, 5, activation='relu'),
MaxPooling1D(50),
Flatten()]
def composed_layers(x):
for layer in layers:
x = layer(x)
return x
return composed_layers
post_submodel = build_post_submodel(post_arch, True)
# create an input for each post
input_tensors = []
encoded_posts = []
for i in range(use_number_of_posts):
post_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
input_tensors.append(post_input)
encoded_post = post_submodel(post_input)
encoded_posts.append(encoded_post)
#trying to add metadata to my model
post_submodel = build_post_submodel(post_arch, False)
metadata_tensor = Input(shape=np.array(metadata_train).shape)
input_tensors.append(metadata_input)
encoded_post = post_submodel(metadata_input)
encoded_posts.append(encoded_post)
merged_vector = concatenate(encoded_posts, axis=-1)
preds = Dense(num_classes, activation='softmax')(merged_vector)
model = Model(input_tensors, preds)
model.summary()

Related

" ValueError: Expecting KerasTensor which is from tf.keras.Input()". Error in prediction with dropout function

I am trying to predict uncertainty in a regression problem using Dropout during testing as per Yarin Gal's article. I created a class using Keras's backend function as provided by this stack overflow question's answer. The class takes a NN model as input and randomly drops neurons during testing to give a stochastic estimate rather than deterministic output for a time-series forecasting.
I create a simple encoder-decoder model as shown below for the forecasting with 0.1 dropout during training:
input_sequence = Input(shape=(lookback, train_x.shape[2]))
encoder = LSTM(128, return_sequences=False)(input_sequence)
r_vec = RepeatVector(forward_pred)(encoder)
decoder = LSTM(128, return_sequences=True, dropout=0.1)(r_vec) #maybe use dropout=0.1
output = TimeDistributed(Dense(train_y.shape[2], activation='linear'))(decoder)
# optimiser = optimizers.Adam(clipnorm=1)
enc_dec_model = Model(input_sequence, output)
enc_dec_model.compile(loss="mean_squared_error",
optimizer="adam",
metrics=['mean_squared_error'])
enc_dec_model.summary()
After that, I define and call the DropoutPrediction class.
# Define the class:
class KerasDropoutPrediction(object):
def __init__(self ,model):
self.f = K.function(
[model.layers[0].input,
K.learning_phase()],
[model.layers[-1].output])
def predict(self ,x, n_iter=10):
result = []
for _ in range(n_iter):
result.append(self.f([x , 1]))
result = np.array(result).reshape(n_iter ,x.shape[0] ,x.shape[1]).T
return result
# Call the object:
kdp = KerasDropoutPrediction(enc_dec_model)
y_pred_do = kdp.predict(x_test,n_iter=100)
y_pred_do_mean = y_pred_do.mean(axis=1)
However, in the line
kdp = KerasDropoutPrediction(enc_dec_model), when I call the LSTM model,
I got the following error message which says the input has to be a Keras Tensor. Can anyone help me with this error?
Error Message:
ValueError: Found unexpected instance while processing input tensors for keras functional model. Expecting KerasTensor which is from tf.keras.Input() or output from keras layer call(). Got: 0
To activate Dropout at inference time, you simply have to specify training=True (TF>2.0) in the layer of interest (in the last LSTM layer in your case)
with training=False
inp = Input(shape=(10, 1))
x = LSTM(1, dropout=0.3)(inp, training=False)
m = Model(inp,x)
# m.compile(...)
# m.fit(...)
X = np.random.uniform(0,1, (1,10,1))
output = []
for i in range(0,100):
output.append(m.predict(X)) # always the same
with training=True
inp = Input(shape=(10, 1))
x = LSTM(1, dropout=0.3)(inp, training=True)
m = Model(inp,x)
# m.compile(...)
# m.fit(...)
X = np.random.uniform(0,1, (1,10,1))
output = []
for i in range(0,100):
output.append(m.predict(X)) # always different
In your example, this becomes:
input_sequence = Input(shape=(lookback, train_x.shape[2]))
encoder = LSTM(128, return_sequences=False)(input_sequence)
r_vec = RepeatVector(forward_pred)(encoder)
decoder = LSTM(128, return_sequences=True, dropout=0.1)(r_vec, training=True)
output = TimeDistributed(Dense(train_y.shape[2], activation='linear'))(decoder)
enc_dec_model = Model(input_sequence, output)
enc_dec_model.compile(
loss="mean_squared_error",
optimizer="adam",
metrics=['mean_squared_error']
)
enc_dec_model.fit(train_x, train_y, epochs=10, batch_size=32)
and the KerasDropoutPrediction:
class KerasDropoutPrediction(object):
def __init__(self, model):
self.model = model
def predict(self, X, n_iter=10):
result = []
for _ in range(n_iter):
result.append(self.model.predict(X))
result = np.array(result)
return result
kdp = KerasDropoutPrediction(enc_dec_model)
y_pred_do = kdp.predict(test_x, n_iter=100)
y_pred_do_mean = y_pred_do.mean(axis=0)

Freeze sublayers in tensorflow 2

I have a model which is composed of custom layers. Each custom layer contains many tf.keras.layers. The problem is that if I want to freeze those layers after defining my model, the loop:
for i, layer in enumerate(model.layers):
print(i, layer.name)
only prints the "outer" custom layers and not those who exist inside. Is there any way to access the inner layers so I can freeze them?
an example of a custom layer from the official tf docs:
class MLPBlock(layers.Layer):
def __init__(self):
super(MLPBlock, self).__init__()
self.linear_1 = Linear(32)
self.linear_2 = Linear(32)
self.linear_3 = Linear(1)
def call(self, inputs):
x = self.linear_1(inputs)
x = tf.nn.relu(x)
x = self.linear_2(x)
x = tf.nn.relu(x)
return self.linear_3(x)
You can use keras callbacks. If you want to freeze your first layer after some certain amount of epochs, add this callback
class FreezeCallback(tf.keras.callbacks.Callback):
def __init__(self, n_epochs=10):
super().__init__()
self.n_epochs = n_epochs
def on_epoch_end(self, epoch, logs=None):
if epoch == self.n_epochs:
l = self.model.get_layer('first')
l.trainable = False
What you are doing in your update function is to replace the first Dense() layer with another Dense() layer, this time setting trainable = false.
While this works, I would update the 'update' function as following:
def updt(self):
self.dense1.trainable = False
Ok i came up with a solution.
An "update" function must be implemented inside the custom layer, which updates the inner layers so that they become non trainable.
Here is a sample code:
import tensorflow as tf
import numpy as np
layers = tf.keras.layers
seq_model = tf.keras.models.Sequential
class MDBlock(layers.Layer):
def __init__(self):
super(MDBlock, self).__init__()
self.dense1 = layers.Dense(784, name="first")
self.dense2 = layers.Dense(32, name="second")
self.dense3 = layers.Dense(32, name="third")
self.dense4 = layers.Dense(1, activation='sigmoid', name="outp")
def call(self, inputs):
x = self.dense1(inputs)
x = tf.nn.relu(x)
x = self.dense2(x)
x = tf.nn.relu(x)
x = self.dense3(x)
x = tf.nn.relu(x)
x = self.dense4(x)
return x
def updt(self):
self.dense1.trainable = False
def __str__(self):
return "\nd1:{0}\nd2:{1}\nd3:{2}\nd4:{3}".format(self.dense1.trainable, self.dense2.trainable,
self.dense3.trainable, self.dense4.trainable)
# define layer block
layer = MDBlock()
model = seq_model()
model.add(layers.Input(shape=(784,)))
model.add(layer)
# Use updt function to make layers non-trainable
for i, layer in enumerate(model.layers):
layer.updt()
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
# Generate dummy data
data = np.random.random((1000, 784))
labels = np.random.randint(2, size=(1000, 1))
# Train the model, iterating on the data in batches of 32 samples
model.fit(data, labels, epochs=10, batch_size=32)
# print block's layers state
for i, layer in enumerate(model.layers):
print(i, layer)

How to define and use a custom loss function in keras

I have a model in Keras. The model is using B. cross-entropy (log loss). However, I wanna create my custom B.C.E log loss for it.
here is my model
def get_model(train, num_users, num_items, layers=[20, 10, 5, 2]):
num_layer = len(layers) # Number of layers in the MLP
user_matrix = K.constant(getTrainMatrix(train))
item_matrix = K.constant(getTrainMatrix(train).T)
# Input variables
user_input = Input(shape=(1,), dtype='int32', name='user_input')
item_input = Input(shape=(1,), dtype='int32', name='item_input')
user_rating = Lambda(lambda x: tf.gather(user_matrix, tf.to_int32(x)))(user_input)
item_rating = Lambda(lambda x: tf.gather(item_matrix, tf.to_int32(x)))(item_input)
user_rating = Reshape((num_items, ))(user_rating)
item_rating = Reshape((num_users, ))(item_rating)
MLP_Embedding_User = Dense(layers[0]//2, activation="linear" , name='user_embedding')
MLP_Embedding_Item = Dense(layers[0]//2, activation="linear" , name='item_embedding')
user_latent = MLP_Embedding_User(user_rating)
item_latent = MLP_Embedding_Item(item_rating)
# The 0-th layer is the concatenation of embedding layers
vector = concatenate([user_latent, item_latent])
# Final prediction layer
prediction = Dense(1, activation='sigmoid', kernel_initializer=initializers.lecun_normal(),
name='prediction')(vector)
model_ = Model(inputs=[user_input, item_input],
outputs=prediction)
return model_
Here is the call to the compile function.
model.compile(optimizer=Adam(lr=learning_rate), loss='binary_crossentropy')
Now my question is how to define a custome binary cross entropy loss for it?

I don't get the keras fit_generator to work with mixed (image and number) input

I/m breaking my head on this for 3 days now.
I followed mainly this link to create my own datagenerator. But one way or another I'm doing something wrong and I can't figure out why. My error is:
*ValueError: Error when checking input: expected dense_4_input to have 2 dimensions, but got array with shape (5, 128, 128, 3)
*
The network for the number:
def create_mlp(dim, regress=False):
# define our MLP network
model = Sequential()
model.add(Dense(8, input_dim=dim, activation="relu"))
model.add(Dense(4, activation="relu"))
# check to see if the regression node should be added
if regress:
model.add(Dense(1, activation="linear"))
return model
The CNN for the image:
def create_cnn(inputshape, filters=(16, 32, 64), regress=True):
chanDim = -1
# define the model input
inputs = Input(shape=inputshape)
# loop over the number of filters
for (i, f) in enumerate(filters):
# if this is the first CONV layer then set the input
# appropriately
if i == 0:
x= inputs
# CONV => RELU => BN => POOL
x = Conv2D(f, (3, 3), padding="same")(x)
x = Activation("relu")(x)
x = BatchNormalization(axis=chanDim)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
# flatten the volume, then FC => RELU => BN => DROPOUT
x = Flatten()(x)
x = Dense(16)(x)
x = Activation("relu")(x)
x = BatchNormalization(axis=chanDim)(x)
x = Dropout(0.5)(x)
# apply another FC layer, this one to match the number of nodes
# coming out of the MLP
x = Dense(4)(x)
x = Activation("relu")(x)
# check to see if the regression node should be added
if regress:
x = Dense(1, activation="linear")(x)
# construct the CNN
model = Model(inputs, x)
# return the CNN
return model
My own generator:
def aux_generator(img="todo", aux_input="todo", batch_size=3):
while True:
# Select files (paths/indices) for the batch
# todo make this random
img_path, gridnum, batch_output = get_batch_path()
batch_input_img = []
batch_input_sattelite = []
# Read in each input, perform preprocessing and get labels
for input_path in img_path:
input_img = get_input_image(input_path)
input = preprocess_input(image=input_img)
batch_input_img += [input]
for GridNum in gridnum:
# append is not good!
batch_input_sattelite.append(get_input_sattelite(GridNum))
# Return a tuple of (input,output) to feed the network
batch_x1 = np.array(batch_input_img)
batch_x2 = np.array(batch_input_sattelite)
batch_y = np.array(batch_output)
print("image shape : ", batch_x1.shape) #(5, 128, 128, 3)
print("Aux shape: ", batch_x2.shape, batch_x2) #(5,)
yield [batch_x1, batch_x2], batch_y
def get_batch_path():
# use the df we produced in downloadMpas to know where the images are and what their NO2 concentration is
img_info_df = pd.read_csv(r"Small/mappingTest.csv", delimiter=',', header=None,
names=['GridNum', 'id', 'score', 'lat', 'lon'])
img_info_df = img_info_df[img_info_df.score != "score"]
# the keras network needs float for the score (not object which is default when he reads in)
img_info_df = img_info_df.astype({"GridNum": 'float64', "id": 'object', "score": 'float64'})
return img_info_df['id'].head(n=5), img_info_df['GridNum'].head(n=5), img_info_df['score'].head(n=5)
def get_input_image(path):
# get image
img = image.load_img(r"Small/" + path)
img = image.img_to_array(img)
# get the corresponding value of the sattelite data
return img
def get_input_sattelite(GridNum):
sattelite_no2 = sattelite_df[sattelite_df['GridNum'] == GridNum]['sattelite'].values[0]
print("sattelite no2:", sattelite_no2)
return sattelite_no2
def preprocess_input(image):
# do whatever we want to the images
return (image)
The main:
sattelite_df = pd.read_csv(r"Small/sattelite.csv", delimiter=',', header=None,
names=['GridNum', 'id', 'score', 'lat', 'lon', 'sattelite'])
input_img_shape = (128, 128, 3)
input_aux_shape = (1)
img_model = create_cnn(input_img_shape)
aux_model = create_mlp(input_aux_shape, regress=False)
combinedInput = concatenate([aux_model.output, img_model.output])
# our final FC layer head will have two dense layers, the final one
# being our regression head
x = Dense(4, activation="relu")(combinedInput)
x = Dense(1, activation="linear")(x)
# our final model will accept categorical/numerical data on the MLP
# input and images on the CNN input, outputting a single value (the
# predicted price of the house)
model = Model(inputs=[aux_model.input, img_model.input], outputs=x)
opt = Adam(lr=1e-3, decay=1e-3 / 200)
model.compile(loss="mean_absolute_percentage_error", optimizer=opt)
batch_size = 2
early = EarlyStopping(monitor='val_loss', patience=3, verbose=1, restore_best_weights=True)
ImageFile.LOAD_TRUNCATED_IMAGES = True
model.fit_generator(
aux_generator(batch_size=batch_size),
steps_per_epoch=10 // batch_size,
epochs=2,
validation_data=aux_generator(batch_size=3),
validation_steps=20 // batch_size,
callbacks=[early])
Any help is welcome, as I don't know what I'm doing wrong?

Re-use speechT example with LSTM network

From example at https://github.com/timediv/speechT, I'm trying to adapt to use with LSTM network but failed please help. I tried many combination but I always got error i.e. Input must be sequence or else. I need to implement LSTM network to the example to for speech recognition purpose and after I tried for couple of weeks I still get stuck in the coding problem. Anyone can help me provide example of using LSTM network with the sample will be good.
class InputBatchLoader(BaseInputLoader):
def __init__(self, input_size, batch_size, data_generator_creator, max_steps=None):
super().__init__(input_size)
self.batch_size = batch_size
self.data_generator_creator = data_generator_creator
self.steps_left = max_steps
with tf.device("/cpu:0"):
with tf.device("/cpu:0"):
# Define input and label placeholders
self.inputs = tf.placeholder(tf.float32, [batch_size, None, input_size], name='inputs')
self.sequence_lengths = tf.placeholder(tf.int32, [batch_size], name='sequence_lengths')
self.labels = tf.sparse_placeholder(tf.int32, name='labels')
# Queue for inputs and labels
self.queue = tf.FIFOQueue(dtypes=[tf.float32, tf.int32, tf.string],
capacity=100)
# queues do not support sparse tensors yet, we need to serialize...
serialized_labels = tf.serialize_many_sparse(self.labels)
self.enqueue_op = self.queue.enqueue([self.inputs,
self.sequence_lengths,
serialized_labels])
class Wav2LetterLSTMModel(SpeechModel): #Add Sep 14, 2017 to create LSTM model
def __init__(self, input_loader: BaseInputLoader, input_size: int, num_classes: int):
super().__init__(input_loader, input_size, num_classes)
def _create_network(self, num_classes):
cellsize = 256
num_layers = 3
inputs = self.inputs
lstm_cell = rnn.BasicLSTMCell(cellsize, forget_bias=1.0)
outputs, states = tf.nn.dynamic_rnn(lstm_cell, inputs, dtype=tf.float32)
return tf.transpose(outputs, (1, 0, 2))
def create_default_model(flags, input_size: int, speech_input: BaseInputLoader) -> SpeechModel:
model = Wav2LetterLSTMModel(input_loader=speech_input,
input_size=input_size,
num_classes=speecht.vocabulary.SIZE + 1) #Add Sep 14, 2017, to use LSTM model
# TODO how can we restore only selected variables so we do not need to always create the full network?
if flags.command == 'train':
model.add_training_ops(learning_rate=flags.learning_rate,
learning_rate_decay_factor=flags.learning_rate_decay_factor,
max_gradient_norm=flags.max_gradient_norm,
momentum=flags.momentum)
model.add_decoding_ops()
elif flags.command == 'export':
model.add_training_ops()
model.add_decoding_ops()
else:
model.add_training_ops()
model.add_decoding_ops(language_model=flags.language_model,
lm_weight=flags.lm_weight,
word_count_weight=flags.word_count_weight,
valid_word_count_weight=flags.valid_word_count_weight)
model.finalize(log_dir=flags.log_dir,
run_name=flags.run_name,
run_type=flags.run_type)
return model
At last I use this
XT = tf.transpose(inputs, [1, 0, 2])
XR = tf.reshape(XT, [-1, self.input_size])
X_split = tf.split(XR, cellsize, 0)
lstm = rnn.BasicLSTMCell(cellsize, forget_bias=1.0, state_is_tuple=True)
outputs, _states = rnn.static_rnn(lstm, X_split, dtype=tf.float32)