How can I create a `tf.data.Dataset` for a `tf.keras` model that accepts multiple inputs of various shapes? - tensorflow

I have a tf.keras model that needs to accept multiple inputs of multiple shapes. My goal is to build it in such a way that I can train and evaluate it easily using its fit and evaluate API.
So far, the model is built as follows:
class MultipleLSTM(Model):
def __init__(self, lstm_dims=128, name='multi_lstm', **kwargs):
super(MultipleLSTM, self).__init__(name=name)
# initialize encoders for every attribute
self.encoders = []
for key, value in kwargs.items():
self.encoders.append(self._create_encoder(lstm_dims, value))
# initialize the rest of the network layers
self.concat = Concatenate(axis=0)
self.conv_1 = Conv2D(6, 4, activation='relu')
self.flatten = Flatten()
self.dense = Dense(128, activation='relu')
self.out = Dense(1, activation='sigmoid')
def call(self, inputs):
x_1 = self.encoders[0](inputs[0])
x_2 = self.encoders[1](inputs[1])
x_3 = self.encoders[2](inputs[2])
x_4 = self.encoders[3](inputs[3])
x = self.concat([x_1, x_2, x_3, x_4])
# fix the shape for the convolutions
x = tf.expand_dims(x, axis=0)
x = tf.expand_dims(x, axis=3)
x = self.conv_1(x)
x = self.flatten(x)
x = self.dense(x)
x = self.out(x)
return x
def _create_encoder(self, lstm_dims, conf):
with tf.name_scope(conf['name']) as scope:
encoder = tf.keras.Sequential(name=scope)
encoder.add(Embedding(conf['vocab'],
conf['embed_dim'],
input_length=conf['input_length']))
encoder.add(Bidirectional(LSTM(lstm_dims)))
return encoder
There are four different inputs, text sentences of different lengths, that are fed to four different Embedding and LSTM layers (encoders). Then the outputs of those layers are concatenated to create a single tensor that is forwarded to the subsequent layers.
To train this network, I'm passing as input a list of lists, for the different tokenized sentences. The label is just number, 0 or 1 (binary classification). For example, an input could be:
x = [[1, 2, 3, 4],
[2, 3, 5],
[3, 5, 6, 7],
[1, 5, 7]]
y = 0
For now, I have implemented a custom loop that takes such input and trains the network:
def train(data, model, loss_fn, optimizer, metric, epochs=10, print_every=50):
for epoch in range(epochs):
print(f'Start of epoch {epoch+1}')
for step, (x_batch, y_batch) in enumerate(data):
with GradientTape() as tape:
output = model(x_batch)
loss = loss_fn(y_batch, output)
grads = tape.gradient(loss, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
metric(loss)
if step % print_every == 0:
print(f'step {step}: mean loss = {metric.result()}')
But this prevents me from exploiting the easy to use tf.keras API, to fit and evaluate the model or even split the dataset into train and test sets. Thus, the question is: How can I create a tf.data.Dataset from such x's and y's and pass it to the fit function of tf.keras?

You can use the functional api of keras to do so. Here is the link of the keras documentation on multi input, output if you want : Multi-input and multi-output models

You can directly pass the different inputs as a list and fit and evaluate methods.
model.fit([X_train[:,0], X_train[:,1]], y_train, ...)

Related

NN on tensorflow doesn't train

I try to train a simple tensorflow-network on a simple model, but from some reason, it doesn't learn anything. Do I make any mistake?
X, Y = read_data(file_name)
# CONSTRUCT GRAPH
x_t = tf.placeholder(shape=[None, X.shape[1]], dtype=tf.float32)
y_t = tf.placeholder(shape=[None,], dtype=tf.float32)
hidden_1 = tf.layers.dense(x_t, 50, activation=tf.nn.sigmoid)
hidden_2 = tf.layers.dense(hidden_1, 50, activation=tf.nn.sigmoid)
output = tf.layers.dense(hidden_2, 1, activation=tf.nn.sigmoid)
# DEFINE LOSS AND OPTIMIZER
loss = tf.reduce_mean(tf.square(output - y_t))
GD_optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
train_step = GD_optimizer.minimize(loss)
# BATCH SIZE
BATCH_SIZE = 20
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(15000):
rand_indices = np.random.choice(X.shape[0], size=BATCH_SIZE)
x_batch = X[rand_indices,:]
y_batch = Y[rand_indices]
_, temp_loss = sess.run([train_step, loss], feed_dict={x_t: x_batch, y_t: y_batch})
print(temp_loss)
According to my understanding of your dataset description, the target value column Y is a float (real valued number) that can be in any range and not necessarily within the [0,1] interval.
On the otherhand, because you use a sigmoid activation for the last layer of your model. The prediction values will be always in [0, 1] range.
I would suggest not using sigmoid activation in the last layer. Unless if your Y value were also in the [0,1] range.
so, modify your code such that it becomes
output = tf.layers.dense(hidden_2, 1, activation=None)

Re-use speechT example with LSTM network

From example at https://github.com/timediv/speechT, I'm trying to adapt to use with LSTM network but failed please help. I tried many combination but I always got error i.e. Input must be sequence or else. I need to implement LSTM network to the example to for speech recognition purpose and after I tried for couple of weeks I still get stuck in the coding problem. Anyone can help me provide example of using LSTM network with the sample will be good.
class InputBatchLoader(BaseInputLoader):
def __init__(self, input_size, batch_size, data_generator_creator, max_steps=None):
super().__init__(input_size)
self.batch_size = batch_size
self.data_generator_creator = data_generator_creator
self.steps_left = max_steps
with tf.device("/cpu:0"):
with tf.device("/cpu:0"):
# Define input and label placeholders
self.inputs = tf.placeholder(tf.float32, [batch_size, None, input_size], name='inputs')
self.sequence_lengths = tf.placeholder(tf.int32, [batch_size], name='sequence_lengths')
self.labels = tf.sparse_placeholder(tf.int32, name='labels')
# Queue for inputs and labels
self.queue = tf.FIFOQueue(dtypes=[tf.float32, tf.int32, tf.string],
capacity=100)
# queues do not support sparse tensors yet, we need to serialize...
serialized_labels = tf.serialize_many_sparse(self.labels)
self.enqueue_op = self.queue.enqueue([self.inputs,
self.sequence_lengths,
serialized_labels])
class Wav2LetterLSTMModel(SpeechModel): #Add Sep 14, 2017 to create LSTM model
def __init__(self, input_loader: BaseInputLoader, input_size: int, num_classes: int):
super().__init__(input_loader, input_size, num_classes)
def _create_network(self, num_classes):
cellsize = 256
num_layers = 3
inputs = self.inputs
lstm_cell = rnn.BasicLSTMCell(cellsize, forget_bias=1.0)
outputs, states = tf.nn.dynamic_rnn(lstm_cell, inputs, dtype=tf.float32)
return tf.transpose(outputs, (1, 0, 2))
def create_default_model(flags, input_size: int, speech_input: BaseInputLoader) -> SpeechModel:
model = Wav2LetterLSTMModel(input_loader=speech_input,
input_size=input_size,
num_classes=speecht.vocabulary.SIZE + 1) #Add Sep 14, 2017, to use LSTM model
# TODO how can we restore only selected variables so we do not need to always create the full network?
if flags.command == 'train':
model.add_training_ops(learning_rate=flags.learning_rate,
learning_rate_decay_factor=flags.learning_rate_decay_factor,
max_gradient_norm=flags.max_gradient_norm,
momentum=flags.momentum)
model.add_decoding_ops()
elif flags.command == 'export':
model.add_training_ops()
model.add_decoding_ops()
else:
model.add_training_ops()
model.add_decoding_ops(language_model=flags.language_model,
lm_weight=flags.lm_weight,
word_count_weight=flags.word_count_weight,
valid_word_count_weight=flags.valid_word_count_weight)
model.finalize(log_dir=flags.log_dir,
run_name=flags.run_name,
run_type=flags.run_type)
return model
At last I use this
XT = tf.transpose(inputs, [1, 0, 2])
XR = tf.reshape(XT, [-1, self.input_size])
X_split = tf.split(XR, cellsize, 0)
lstm = rnn.BasicLSTMCell(cellsize, forget_bias=1.0, state_is_tuple=True)
outputs, _states = rnn.static_rnn(lstm, X_split, dtype=tf.float32)

TensorFlow: How to embed float sequences to fixed size vectors?

I am looking methods to embed variable length sequences with float values to fixed size vectors. The input formats as following:
[f1,f2,f3,f4]->[f1,f2,f3,f4]->[f1,f2,f3,f4]-> ... -> [f1,f2,f3,f4]
[f1,f2,f3,f4]->[f1,f2,f3,f4]->[f1,f2,f3,f4]->[f1,f2,f3,f4]-> ... -> [f1,f2,f3,f4]
...
[f1,f2,f3,f4]-> ... -> ->[f1,f2,f3,f4]
Each line is a variable length sequnece, with max length 60. Each unit in one sequece is a tuple of 4 float values. I have already paded zeros to fill all sequences to the same length.
The following architecture seems solve my problem if I use the output as the same as input, I need the thought vector in the center as the embedding for the sequences.
In tensorflow, I have found tow candidate methods tf.contrib.legacy_seq2seq.basic_rnn_seq2seq and tf.contrib.legacy_seq2seq.embedding_rnn_seq2seq.
However, these tow methos seems to be used to solve NLP problem, and the input must be discrete value for words.
So, is there another functions to solve my problems?
All you need is only an RNN, not the seq2seq model, since seq2seq goes with an additional decoder which is unecessary in your case.
An example code:
import numpy as np
import tensorflow as tf
from tensorflow.contrib import rnn
input_size = 4
max_length = 60
hidden_size=64
output_size = 4
x = tf.placeholder(tf.float32, shape=[None, max_length, input_size], name='x')
seqlen = tf.placeholder(tf.int64, shape=[None], name='seqlen')
lstm_cell = rnn.BasicLSTMCell(hidden_size, forget_bias=1.0)
outputs, states = tf.nn.dynamic_rnn(cell=lstm_cell, inputs=x, sequence_length=seqlen, dtype=tf.float32)
encoded_states = states[-1]
W = tf.get_variable(
name='W',
shape=[hidden_size, output_size],
dtype=tf.float32,
initializer=tf.random_normal_initializer())
b = tf.get_variable(
name='b',
shape=[output_size],
dtype=tf.float32,
initializer=tf.random_normal_initializer())
z = tf.matmul(encoded_states, W) + b
results = tf.sigmoid(z)
###########################
## cost computing and training components goes here
# e.g.
# targets = tf.placeholder(tf.float32, shape=[None, input_size], name='targets')
# cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=targets, logits=z))
# optimizer = tf.train.AdamOptimizer(learning_rate=0.1).minimize(cost)
###############################
init = tf.global_variables_initializer()
batch_size = 4
data_in = np.zeros((batch_size, max_length, input_size), dtype='float32')
data_in[0, :4, :] = np.random.rand(4, input_size)
data_in[1, :6, :] = np.random.rand(6, input_size)
data_in[2, :20, :] = np.random.rand(20, input_size)
data_in[3, :, :] = np.random.rand(60, input_size)
data_len = np.asarray([4, 6, 20, 60], dtype='int64')
with tf.Session() as sess:
sess.run(init)
#########################
# training process goes here
#########################
res = sess.run(results,
feed_dict={
x: data_in,
seqlen: data_len})
print(res)
To encode sequence to a fixed length vector you typically use recurrent neural networks (RNNs) or convolutional neural networks (CNNs).
If you use a recurrent neural network you can use the output at the last time step (last element in your sequence). This corresponds to the thought vector in your question. Have a look at tf.dynamic_rnn. dynamic_rnn requires you to specify to type of RNN cell you want to use. tf.contrib.rnn.LSTMCell and tf.contrib.rnn.GRUCell are most common.
If you want to use CNNs you need to use 1 dimensional convolutions. To build CNNs you need tf.layers.conv1d and tf.layers.max_pooling1d
I have found a solution to my problem, using the following architecture,
,
The LSTMs layer below encode the series x1,x2,...,xn. The last output, the green one, is duplicated to the same count as the input for the decoding LSTM layers above. The tensorflow code is as following
series_input = tf.placeholder(tf.float32, [None, conf.max_series, conf.series_feature_num])
print("Encode input Shape", series_input.get_shape())
# encoding layer
encode_cell = tf.contrib.rnn.MultiRNNCell(
[tf.contrib.rnn.BasicLSTMCell(conf.rnn_hidden_num, reuse=False) for _ in range(conf.rnn_layer_num)]
)
encode_output, _ = tf.nn.dynamic_rnn(encode_cell, series_input, dtype=tf.float32, scope='encode')
print("Encode output Shape", encode_output.get_shape())
# last output
encode_output = tf.transpose(encode_output, [1, 0, 2])
last = tf.gather(encode_output, int(encode_output.get_shape()[0]) - 1)
# duplite the last output of the encoding layer
decoder_input = tf.stack([last for _ in range(conf.max_series)], axis=1)
print("Decoder input shape", decoder_input.get_shape())
# decoding layer
decode_cell = tf.contrib.rnn.MultiRNNCell(
[tf.contrib.rnn.BasicLSTMCell(conf.series_feature_num, reuse=False) for _ in range(conf.rnn_layer_num)]
)
decode_output, _ = tf.nn.dynamic_rnn(decode_cell, decoder_input, dtype=tf.float32, scope='decode')
print("Decode output", decode_output.get_shape())
# Loss Function
loss = tf.losses.mean_squared_error(labels=series_input, predictions=decode_output)
print("Loss", loss)

how to stack LSTM layers using TensorFlow

what I have is the following, which I believe is a network with one hidden LSTM layer:
# Parameters
learning rate = 0.001
training_iters = 100000
batch_size = 128
display_step = 10
# Network Parameters
n_input = 13
n_steps = 10
n_hidden = 512
n_classes = 13
# tf Graph input
x = tf.placeholder("float", [None, n_steps, n_input])
y = tf.placeholder("float", [None, n_classes])
# Define weights
weights = {
'out' : tf.Variable(tf.random_normal([n_hidden, n_classes]))
}
biases = {
'out' : tf.Variable(tf.random_normal([n_classes]))
}
However, I am trying to build an LSTM network using TensorFlow to predict power consumption. I have been looking around to find a good example, but I could not find any model with 2 hidden LSTM layers. Here's the model that I would like to build:
1 input layer,
1 output layer,
2 hidden LSTM layers(with 512 neurons in each),
time step(sequence length): 10
Could anyone guide me to build this using TensorFlow? ( from defining weights, building input shape, training, predicting, use of optimizer or cost function, etc), any help would be much appreciated.
Thank you so much in advance!
Here is how I do it in a translation model with GRU cells. You can just replace the GRU with an LSTM. It is really easy just use tf.nn.rnn_cell.MultiRNNCell with a list of the multiple cells it should wrap. In the code bellow I am manually unrolling it but you can pass it to tf.nn.dynamic_rnn or tf.nn.rnn as well.
y = input_tensor
with tf.variable_scope('encoder') as scope:
rnn_cell = rnn.MultiRNNCell([rnn.GRUCell(1024) for _ in range(3)])
state = tf.zeros((BATCH_SIZE, rnn_cell.state_size))
output = [None] * TIME_STEPS
for t in reversed(range(TIME_STEPS)):
y_t = tf.reshape(y[:, t, :], (BATCH_SIZE, -1))
output[t], state = rnn_cell(y_t, state)
scope.reuse_variables()
y = tf.pack(output, 1)
First you need some placeholders to put your training data (one batch)
x_input = tf.placeholder(tf.float32, [batch_size, truncated_series_length, 1])
y_output = tf.placeholder(tf.float32, [batch_size, truncated_series_length, 1])
A LSTM need a state, which consists of two components, the hidden state and the cell state, very good guide here: https://arxiv.org/pdf/1506.00019.pdf. For every layer in the LSTM you have one cell state and one hidden state.
The problem is that Tensorflow stores this in a LSTMStateTuple which you can not send into placeholder. So you need to store it in a Tensor, and then unpack it into a tuple:
state_placeholder = tf.placeholder(tf.float32, [num_layers, 2, batch_size, state_size])
l = tf.unpack(state_placeholder, axis=0)
rnn_tuple_state = tuple(
[tf.nn.rnn_cell.LSTMStateTuple(l[idx][0], l[idx][1])
for idx in range(num_layers)]
)
Then you can use the built-in Tensorflow API to create the stacked LSTM layer.
cell = tf.nn.rnn_cell.LSTMCell(state_size, state_is_tuple=True)
cell = tf.nn.rnn_cell.MultiRNNCell([cell]*num_layers, state_is_tuple=True)
outputs, state = tf.nn.dynamic_rnn(cell, x_input, initial_state=rnn_tuple_state)
From here you continue with the outputs to calculate logits and then a loss with respect to the y_inputs.
Then you run each batch with the sess.run-command, with truncated backpropagation (good explanation here http://r2rt.com/styles-of-truncated-backpropagation.html)
init_state = np.zeros((num_layers, 2, batch_size, state_size))
...current_state... = sess.run([...state...], feed_dict={x_input:batch_in, state_placeholder:current_state ...})
current_state = np.array(current_state)
You will have to convert the state to a numpy array before feeding it again.
Perhaps it is better to use a librarly like Tflearn or Keras instead?

Minimal RNN example in tensorflow

Trying to implement a minimal toy RNN example in tensorflow.
The goal is to learn a mapping from the input data to the target data, similar to this wonderful concise example in theanets.
Update: We're getting there. The only part remaining is to make it converge (and less convoluted). Could someone help to turn the following into running code or provide a simple example?
import tensorflow as tf
from tensorflow.python.ops import rnn_cell
init_scale = 0.1
num_steps = 7
num_units = 7
input_data = [1, 2, 3, 4, 5, 6, 7]
target = [2, 3, 4, 5, 6, 7, 7]
#target = [1,1,1,1,1,1,1] #converges, but not what we want
batch_size = 1
with tf.Graph().as_default(), tf.Session() as session:
# Placeholder for the inputs and target of the net
# inputs = tf.placeholder(tf.int32, [batch_size, num_steps])
input1 = tf.placeholder(tf.float32, [batch_size, 1])
inputs = [input1 for _ in range(num_steps)]
outputs = tf.placeholder(tf.float32, [batch_size, num_steps])
gru = rnn_cell.GRUCell(num_units)
initial_state = state = tf.zeros([batch_size, num_units])
loss = tf.constant(0.0)
# setup model: unroll
for time_step in range(num_steps):
if time_step > 0: tf.get_variable_scope().reuse_variables()
step_ = inputs[time_step]
output, state = gru(step_, state)
loss += tf.reduce_sum(abs(output - target)) # all norms work equally well? NO!
final_state = state
optimizer = tf.train.AdamOptimizer(0.1) # CONVERGEs sooo much better
train = optimizer.minimize(loss) # let the optimizer train
numpy_state = initial_state.eval()
session.run(tf.initialize_all_variables())
for epoch in range(10): # now
for i in range(7): # feed fake 2D matrix of 1 byte at a time ;)
feed_dict = {initial_state: numpy_state, input1: [[input_data[i]]]} # no
numpy_state, current_loss,_ = session.run([final_state, loss,train], feed_dict=feed_dict)
print(current_loss) # hopefully going down, always stuck at 189, why!?
I think there are a few problems with your code, but the idea is right.
The main issue is that you're using a single tensor for inputs and outputs, as in:
inputs = tf.placeholder(tf.int32, [batch_size, num_steps]).
In TensorFlow the RNN functions take a list of tensors (because num_steps can vary in some models). So you should construct inputs like this:
inputs = [tf.placeholder(tf.int32, [batch_size, 1]) for _ in xrange(num_steps)]
Then you need to take care of the fact that your inputs are int32s, but a RNN cell works on float vectors - that's what embedding_lookup is for.
And finally you'll need to adapt your feed to put in the input list.
I think the ptb tutorial is a reasonable place to look, but if you want an even more minimal example of an out-of-the-box RNN you can take a look at some of the rnn unit tests, e.g., here.
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/kernel_tests/rnn_test.py#L164