Deploy local keras trained model in Google Cloud ML. Change Input shape - tensorflow

I have train locally a simple Keras network that generates a text sequence using GRU. The model has an input shape [1, window_size, number_charazters].
I have export the model as TF model and server it using tensorflow_model_server. For predict, I use an input with shape [1, window_size, number_charazters].
When I deploy to google cloud ML, the model has an input of [number_charazter, window_size].
Why change the input shape in Google Cloud ML?
Keras network
def create_gru_model( num_chars):
"""
Define the network
:param
numbers_chars .- Number chars using in the training process
:return:
model .- Model network defined
"""
model = Sequential()
# 1 Layer .- GRU layer 1 should be an GRU module with 200 hidden units
model.add(GRU(200, input_shape=(window_size, num_chars),return_sequences=True))
# 2 Layer .- GRU layer 2 should be an GRU module with 200 hidden units
model.add(GRU(200))
# 2 Layer .- Dense, with number chars unit and softmax activation
model.add(Dense(num_chars, activation='softmax'))
return model
Export model to tensorflow_model_server.
if os.path.isdir(export_path):
shutil.rmtree(export_path)
builder = saved_model_builder.SavedModelBuilder(export_path)
signature = predict_signature_def(inputs={'sequence': model.input},
outputs={'scores': model.output})
with K.get_session() as sess:
builder.add_meta_graph_and_variables(sess=sess,
tags=[tag_constants.SERVING],
signature_def_map={'predict': signature})
builder.save()
Predict model with tensorflow_model_server.
input_init="pla panfletaria contra as leoninas taxas impostas polo ministro de xustiza actual malia que vulneran"
# Load values
window_size = 100
chars_to_indices, indices_to_chars = load_coded_dictionaries()
number_chars=len(chars_to_indices)
# Clean the text
input_clean=clean_text(input_init.lower())
input_clean = input_clean[:window_size]
# Text to array [1,input_lenght,num_chars]
x_test = np.zeros((1,window_size, number_chars))
for t, char in enumerate(input_clean):
x_test[0, t, chars_to_indices[char]] = 1.
x_test
# Get the array with the probabilities for the next charazter
channel = grpc.insecure_channel("localhost:" + str(9000))
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
request = predict_pb2.PredictRequest()
# Name of the model
request.model_spec.name = 'default'
request.model_spec.signature_name = 'predict'
request.inputs['inputs'].CopyFrom(
tf.contrib.util.make_tensor_proto(
x_test,dtype='float32'))
result=stub.Predict(request)
# Get the charazter from array
test_predict=np.array(result.outputs['outputs'].float_val)
r = np.argmax(test_predict) # predict class of each test input
d = indices_to_chars[r]
Export model to Google Cloud ML
import keras.backend as K
K.set_learning_phase(0)
# Input size of the network, the entry text must have the same length
window_size = 100
# Get dictionaries
chars_to_indices, indices_to_chars = load_coded_dictionaries()
number_chars=len(chars_to_indices)
# regenerate the model
model=create_gru_model(number_chars)
model.load_weights(
'../model_weights/best_beiras_gru_textdata_weights.hdf5')
# Path to export, 1 is the version,
# we can serve differents version with the same server
export_path = "../export-google-ml/1"
if os.path.isdir(export_path):
shutil.rmtree(export_path)
builder = saved_model_builder.SavedModelBuilder(export_path)
signature = predict_signature_def(inputs={'sequence': model.input},
outputs={'scores': model.output})
with K.get_session() as sess:
builder.add_meta_graph_and_variables(sess=sess,
tags=[tag_constants.SERVING],
signature_def_map={'serving_default': signature})
builder.save()
Predict model with Google Cloud ML
input_init="pla panfletaria contra as leoninas taxas impostas polo ministro de xustiza actual malia que vulneran"
# Load values
window_size = 100
chars_to_indices, indices_to_chars = load_coded_dictionaries()
number_chars=len(chars_to_indices)
# Clean the text
input_clean=clean_text(input_init.lower())
input_clean = input_clean[:window_size]
# Text to array [number_chars,window_size]
x_test = np.zeros((number_chars,window_size))
for t, char in enumerate(input_clean):
x_test[ chars_to_indices[char],t] = 1.
service = googleapiclient.discovery.build('ml', 'v1')
name = 'projects/{}/models/{}'.format(project, model)
if version is not None:
name += '/versions/{}'.format(version)
instances={'sequence':x_test.tolist()}
response = service.projects().predict(
name=name,
body={'instances': instances}
).execute()
if 'error' in response:
raise RuntimeError(response['error'])
test_predict=np.array(response['predictions'][0]['scores'])
r = np.argmax(test_predict) # predict class of each test input
indices_to_chars[r]

Related

Keras Bidirectional LSTM seq2seq inference model expects 3 inputs but only receives 1, even though I am passing in 3 inputs

I am creating a language model with a bidirecitonal LSTM, seq2seq model.
I have created the model and trained it successfully:
lstm_units = 100
# Set up embedding layer using pretrained weights
embedding_layer = Embedding(total_words+1, emb_dimension, input_length=max_input_len, weights=[embedding_matrix], name="Embedding")
# Encoder
encoder_input_x = Input(shape=(None,), name="Enc_x_Input")
encoder_embedding_x = embedding_layer(encoder_input_x)
encoder_lstm_x, enc_state_h_fwd, enc_state_c_fwd, enc_state_h_bwd, enc_state_c_bwd = Bidirectional(LSTM(lstm_units, dropout=0.5, return_state=True, name="Enc_LSTM1"), name="Enc_Bi1")(encoder_embedding_x) # pass hidden activation and memory cell states forward
encoder_state_h = Concatenate()([enc_state_h_fwd, enc_state_h_bwd])
encoder_state_c = Concatenate()([enc_state_c_fwd, enc_state_c_bwd])
encoder_states = [encoder_state_h, encoder_state_c] # package states to pass to decoder
# Decoder
decoder_input_x = Input(shape=(None,), name="Dec_x_Input")
decoder_embedding_x = embedding_layer(decoder_input_x)
decoder_lstm_layer = LSTM(lstm_units*2, return_state=True, return_sequences=True, dropout=0.5, name="Dec_LSTM1") # We define an LSTM layer without passing anything in here, as we will need to use this LSTM later.
decoder_lstm_x, _, _ = decoder_lstm_layer(decoder_embedding_x, initial_state=encoder_states) # we pass in encoder states
decoder_dense_layer = TimeDistributed(Dense(total_words+1, activation="softmax", name="Dec_Softmax")) # we set this dense to a variable so we can use it later, as above with the LSTM
decoder_output_x = decoder_dense_layer(decoder_lstm_x)
model = Model(inputs=[encoder_input_x, decoder_input_x], outputs=decoder_output_x)
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
I then set up the inference model:
# Inference Encoder
inf_encoder_model = Model(encoder_input_x, encoder_states) # Here we are creating a model using layers from the model we built earlier.
# The encoder model outputs the encoder_states, ie. the concatenated h and c values from the BLSTM
# Inference Decoder
# Create new inputs for decoder state
inf_dec_state_h_input = Input(shape=(2*lstm_units,), name="Dec_h_state_input") # The must be sized to fit both FWD and BWD h values from the BLSTM
inf_dec_state_c_input = Input(shape=(2*lstm_units,), name="Dec_c_state_input")
inf_dec_state_input = [inf_dec_state_h_input, inf_dec_state_c_input] # package states to pass to decoder
# Decoder LSTM + Dense
inf_decoder_lstm_x, inf_dec_state_h, inf_dec_state_c = decoder_lstm_layer(decoder_embedding_x, initial_state=inf_dec_state_input) # reuse embedding layer from training. We pass in encoder states
inf_decoder_states = [inf_dec_state_h, inf_dec_state_c] # I think we we loop inference, we'll pass these states back in to the input instead of the encoder states
inf_decoder_output = decoder_dense_layer(inf_decoder_lstm_x)
decoder_model = Model([decoder_input_x] + inf_dec_state_input, [inf_decoder_output] + inf_decoder_states) # we reuse the decoder_input_x from the training model
The decoder model for inference is set up to take the decoder inputs + the c and h states which are output from the encoder.
When running the inference loop using this code:
states = inf_encoder_model.predict(x_inputs[700])
# Generate empty target sequence of length 1.
target_seq = np.zeros((max_output_len, 1), dtype=int)
# Populate the first character of target sequence with the start character.
target_seq[0, 0] = 4 # 4 is the start of sequence token used during training
# Get prediction
prediction, h, c = decoder_model.predict([target_seq] + states)
it gives me a long error that ends with:
ValueError: Layer Dec_LSTM1 expects 3 input(s), but it received 1 input tensors. Inputs received: [<tf.Tensor 'model_16/Embedding/embedding_lookup/Identity_1:0' shape=(None, 1, 100) dtype=float32>]
The encoder states seem to be fine; a list containing 2 arrays, the h and c values, each with shape (60, 200). The target_seq is an array of shape (1, 60). x_inputs[700] is training data, also of shape (1, 60).
Why is the model.predict line suggesting I am giving it 1 input tensor when I am giving it a list containing 3 arrays?

Are there any tools/libraries that can convert tensorflow lstm model to .mlmodel format to run in iOS app

I have a simple tensorflow model that consists of lstm layers - such as tf.contrib.rnn.LSTMBlockCell or tf.keras.layers.LSTM (I can provide the sample code also, if needed). I want to run the model on an iOS app. However, I have looked at several websites that say that presently there is no way to convert and run tensorflow model that consist LSTM layers on iOS apps.
I have tried these tools/libraries to convert the tensorflow model to .mlmodel format (or .tflite format)
1. Swift for Tensorflow
2. Tensorflow Lite for iOS
3. tfcoreml
However, these tools also does not seem to be able to convert the lstm layers model to .mlmodel format. These tools, however allow to use custom layers to be added. But I don't know how I can add LSTM custom layer.
Am I wrong in saying that there is no support to run tensorflow lstm model in iOS apps? If yes, then please guide me on how I can go ahead to include the model in iOS app. Is there any other tool/library that can be ued to convert it to .mlmodel format. If no, then are there any plans to include tensorflow support for iOS in future?
Model
import tensorflow as tf
from tensorflow.contrib import rnn
from tensorflow.contrib.rnn import *
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
#Summary parameters
logs_path = "logs/"
# Training Parameters
learning_rate = 0.001
training_steps = 1000
batch_size = 128
display_step = 200
# Network Parameters
num_input = 28 # MNIST data input (img shape: 28*28)
timesteps = 28 # timesteps
num_hidden = 128 # hidden layer num of features
num_classes = 10 # MNIST total classes (0-9 digits)
# tf Graph input
X = tf.placeholder("float", [None, timesteps, num_input])
Y = tf.placeholder("float", [None, num_classes])
# Define weights
weights = {
'out': tf.Variable(tf.random_normal([num_hidden, num_classes]))
}
biases = {
'out': tf.Variable(tf.random_normal([num_classes]))
}
def RNN(x, weights, biases):
# Unstack to get a list of 'timesteps' tensors of shape (batch_size, n_input)
x = tf.unstack(x, timesteps, 1)
# Define a lstm cell with tensorflow
lstm_cell = rnn.LSTMBlockCell(num_hidden, forget_bias = 1.0)
#lstm_cell = tf.keras.layers.LSTMCell(num_hidden, unit_forget_bias=True)
# Get lstm cell output
outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
# Linear activation, using rnn inner loop last output
return tf.matmul(outputs[-1], weights['out']) + biases['out']
logits = RNN(X, weights, biases)
with tf.name_scope('Model'):
prediction = tf.nn.softmax(logits, name = "prediction_layer")
with tf.name_scope('Loss'):
# Define loss and optimizer
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y, name = "loss_layer"), name = "reduce_mean_loss")
with tf.name_scope('SGD'):
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate, name = "Gradient_Descent")
train_op = optimizer.minimize(loss_op, name = "minimize_layer")
with tf.name_scope('Accuracy'):
# Evaluate model (with test logits, for dropout to be disabled)
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1), name = "correct_pred_layer")
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name = "reduce_mean_acc_layer")
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
#Create a summary to monitor cost tensor
tf.summary.scalar("loss", loss_op)
#Create a summary to monitor accuracy tensor
tf.summary.scalar("accuracy", accuracy)
#Merge all summaries into a single op
merged_summary_op = tf.summary.merge_all()
saver = tf.train.Saver()
save_path = ""
model_save = "model.ckpt"
# Start training
with tf.Session() as sess:
# Run the initializer
sess.run(init)
# op to write logs to Tensorboard
summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())
for step in range(1, training_steps+1):
total_batch = int(mnist.train.num_examples/batch_size)
batch_x, batch_y = mnist.train.next_batch(batch_size)
# Reshape data to get 28 seq of 28 elements
batch_x = batch_x.reshape((batch_size, timesteps, num_input))
# Run optimization op (backprop)
sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})
if step % display_step == 0 or step == 1:
# Calculate batch loss and accuracy
loss, acc, summary = sess.run([loss_op, accuracy, merged_summary_op], feed_dict={X: batch_x,
Y: batch_y})
# Write logs at every iteration
summary_writer.add_summary(summary, step * total_batch)
print("Step " + str(step) + ", Minibatch Loss= " + \
"{:.4f}".format(loss) + ", Training Accuracy= " + \
"{:.3f}".format(acc))
saver.save(sess, model_save)
tf.train.write_graph(sess.graph_def, save_path, 'save_graph.pbtxt')
#print(sess.graph.get_operations())
print("Optimization Finished!")
print("Run the command line:\n" \
"--> tensorboard --logdir=logs/ " \
"\nThen open http://0.0.0.0:6006/ into your web browser")
# Calculate accuracy for 128 mnist test images
test_len = 128
test_data = mnist.test.images[:test_len].reshape((-1, timesteps, num_input))
test_label = mnist.test.labels[:test_len]
print("Testing Accuracy:", \
sess.run(accuracy, feed_dict={X: test_data, Y: test_label}))
Code to generate the frozen model
import tensorflow as tf
import numpy as np
from tensorflow.python.tools import freeze_graph
save_path = ""
model_name = "test_model_tf_keras_layers_lstm"
input_graph_path = save_path + "save_graph.pbtxt"
checkpoint_path = save_path + "model.ckpt"
input_saver_def_path = ""
input_binary = False
output_node_names = "Model/prediction_layer" #output node's name. Should match to that mentioned in the code
restore_op_name = 'save/restore_all'
filename_tensor_name = 'save/const:0'
output_frozen_graph_name = save_path + 'frozen_model' + '.pb' # name of .pb file that one would like to give
clear_devices = True
freeze_graph.freeze_graph(input_graph_path, input_saver_def_path, input_binary, checkpoint_path, output_node_names, restore_op_name, filename_tensor_name, output_frozen_graph_name, clear_devices, "")
print("Model Freezed")
Conversion Code to generate .mlmodel format file
import tfcoreml
coreml_model = tfcoreml.convert(tf_model_path = 'frozen_model_test_model_tf_keras_layers_lstm.pb',
mlmodel_path = 'test_model.mlmodel',
output_feature_names = ['Model/prediction_layer:0'],
add_custom_layers = True)
coreml_model.save("test_model.mlmodel")
Error message shown with
lstm_cell = rnn.BasicLSTMCell(num_hidden, name = "lstm_cell")
Value Error: Split op case not handled. Input shape = [1, 512], output shape = [1, 128]
Error message shown with
lstm_cell = rnn.LSTMBlockCell(num_hidden, name = "lstm_cell")
InvalidArgumentError (see above for traceback): No OpKernel was registered to support Op 'LSTMBlockCell' used by node rnn/lstm_cell/LSTMBlockCell (defined at /anaconda2/lib/python2.7/site-packages/tfcoreml/_tf_coreml_converter.py:153) with these attrs: [forget_bias=1, use_peephole=false, cell_clip=-1, T=DT_FLOAT]
Registered devices: [CPU]
Registered kernels:
<no registered kernels>
[[node rnn/lstm_cell/LSTMBlockCell (defined at /anaconda2/lib/python2.7/site-packages/tfcoreml/_tf_coreml_converter.py:153) ]]
I expect that the frozen tensorflow model can be converted to .mlmodel format.

Multiple Graphs in one Tensorflow Session

I am currently trying to implement a code that will allow my drone to navigate indoor using tensorflow. I need to run two models in a single session.
One is for the main navigation--which is a retrained Inception V3 model responsible for classifying hallway images and performing move forward, left, or right decision--the second is an object tracking model that will track objects and compute there relative distance to the camera.
I don't know how to use multiple graphs in one session so I tried to create a separate session inside the loop which creates a big overhead and causes my script to run at 0 FPS.
def inception_model():
# Graph for the InceptionV3 Model
graph = load_graph('inception_v3_frozen/inception_v3_2016_08_28_frozen.pb')
with tf.Session(graph = graph) as sess:
while camera.isOpened():
ok, img = camera.read()
cv.imwrite("frame_temp.jpeg", img)
t = read_tensor_from_image('frame_temp.jpeg')
input_layer = "input"
output_layer = "InceptionV3/Predictions/Reshape_1"
input_name = "import/" + input_layer
output_name = "import/" + output_layer
input_operation = graph.get_operation_by_name(input_name)
output_operation = graph.get_operation_by_name(output_name)
results = sess.run(output_operation.outputs[0], {
input_operation.outputs[0] : t
})
results = np.squeeze(results)
top_k = results.argsort()[-5:][::-1]
for i in top_k:
print(labels[i], results[i])
# inception_model()
with tf.Session(graph = object_detection_graph) as sess:
while camera.isOpened():
ok, img = camera.read()
cv.imwrite("frame_temp.jpeg", img)
img = np.array(img)
rows = img.shape[0]
cols = img.shape[1]
inp = cv.resize(img, (299, 299))
# inception_model()
# # Graph for the InceptionV3 Model
# graph = load_graph('inception_v3_frozen/inception_v3_2016_08_28_frozen.pb')
# t = read_tensor_from_image('frame_temp.jpeg')
# input_layer = "input"
# output_layer = "InceptionV3/Predictions/Reshape_1"
# input_name = "import/" + input_layer
# output_name = "import/" + output_layer
# input_operation = graph.get_operation_by_name(input_name)
# output_operation = graph.get_operation_by_name(output_name)
# with tf.Session(graph = graph) as sess:
# results = sess.run(output_operation.outputs[0], {
# input_operation.outputs[0] : t
# })
# results = np.squeeze(results)
# top_k = results.argsort()[-5:][::-1]
# for i in top_k:
# print(labels[i], results[i])
inp = inp[:, :, [2, 1, 0]] # BGR2RGB
# Run the model
out = sess.run([object_detection_graph.get_tensor_by_name('num_detections:0'),
object_detection_graph.get_tensor_by_name('detection_scores:0'),
object_detection_graph.get_tensor_by_name('detection_boxes:0'),
object_detection_graph.get_tensor_by_name('detection_classes:0')],
feed_dict={'image_tensor:0': inp.reshape(1, inp.shape[0], inp.shape[1], 3)})
You don't have to create new sessions on each iteration. Create them once and keep calling their run methods. Tensorflow support multiple active sessions.
Another option is to have a single Graph object and a single Session. The graph can contain both of your models as disconnected sub-graphs. When you ask for a tensor in Session.run() Tensorflow will run only what is necessary to compute the tensor you asked for. So, the other sub-graph will not run (though it will take some, probably very small, time to prune it away)

logits and labels must be same size logits_size

hi i used my own dataset for train the model but i have error that i mention below . my dataset has 124 class and lables are 0 to 123 , size is 60*60 gray , batch is 10 and result is :
lables.eval() --> [ 1 101 101 103 103 103 103 100 102 1] -- len(lables.eval())= 10
orginal pic size -- > (?, 60, 60, 1)
First convolutional layer (?, 30, 30, 32)
Second convolutional layer. (?, 15, 15, 64)
flatten. (?, 14400)
dense .1 (?, 2048)
dense .2 (?, 124)
error
ensorflow.python.framework.errors_impl.InvalidArgumentError: logits and
labels must have the same first dimension, got logits shape [40,124] and
labels shape [10]
code
def model_fn(features, labels, mode, params):
# Reference to the tensor named "image" in the input-function.
x = features["image"]
# The convolutional layers expect 4-rank tensors
# but x is a 2-rank tensor, so reshape it.
net = tf.reshape(x, [-1, img_size, img_size, num_channels])
# First convolutional layer.
net = tf.layers.conv2d(inputs=net, name='layer_conv1',
filters=32, kernel_size=3,
padding='same', activation=tf.nn.relu)
net = tf.layers.max_pooling2d(inputs=net, pool_size=2, strides=2)
# Second convolutional layer.
net = tf.layers.conv2d(inputs=net, name='layer_conv2',
filters=64, kernel_size=3,
padding='same', activation=tf.nn.relu)
net = tf.layers.max_pooling2d(inputs=net, pool_size=2, strides=2)
# Flatten to a 2-rank tensor.
net = tf.contrib.layers.flatten(net)
# Eventually this should be replaced with:
# net = tf.layers.flatten(net)
# First fully-connected / dense layer.
# This uses the ReLU activation function.
net = tf.layers.dense(inputs=net, name='layer_fc1',
units=2048, activation=tf.nn.relu)
# Second fully-connected / dense layer.
# This is the last layer so it does not use an activation function.
net = tf.layers.dense(inputs=net, name='layer_fc_2',
units=num_classes)
# Logits output of the neural network.
logits = net
y_pred = tf.nn.softmax(logits=logits)
y_pred_cls = tf.argmax(y_pred, axis=1)
if mode == tf.estimator.ModeKeys.PREDICT:
spec = tf.estimator.EstimatorSpec(mode=mode,
predictions=y_pred_cls)
else:
cross_entropy =
tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels,
logits=logits)
loss = tf.reduce_mean(cross_entropy)
optimizer =
tf.train.AdamOptimizer(learning_rate=params["learning_rate"])
train_op = optimizer.minimize(
loss=loss, global_step=tf.train.get_global_step())
metrics = \
{
"accuracy": tf.metrics.accuracy(labels, y_pred_cls)
}
spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
eval_metric_ops=metrics)
return spec`
this lables comes from here via tfrecords:
def input_fn(filenames, train, batch_size=10, buffer_size=2048):
# Args:
# filenames: Filenames for the TFRecords files.
# train: Boolean whether training (True) or testing (False).
# batch_size: Return batches of this size.
# buffer_size: Read buffers of this size. The random shuffling
# is done on the buffer, so it must be big enough.
# Create a TensorFlow Dataset-object which has functionality
# for reading and shuffling data from TFRecords files.
dataset = tf.data.TFRecordDataset(filenames=filenames)
# Parse the serialized data in the TFRecords files.
# This returns TensorFlow tensors for the image and labels.
dataset = dataset.map(parse)
if train:
# If training then read a buffer of the given size and
# randomly shuffle it.
dataset = dataset.shuffle(buffer_size=buffer_size)
# Allow infinite reading of the data.
num_repeat = None
else:
# If testing then don't shuffle the data.
# Only go through the data once.
num_repeat = 1
# Repeat the dataset the given number of times.
dataset = dataset.repeat(num_repeat)
# Get a batch of data with the given size.
dataset = dataset.batch(batch_size)
# Create an iterator for the dataset and the above modifications.
iterator = dataset.make_one_shot_iterator()
# Get the next batch of images and labels.
images_batch, labels_batch = iterator.get_next()
# The input-function must return a dict wrapping the images.
x = {'image': images_batch}
y = labels_batch
print(x, ' - ', y.get_shape())
return x, y
i generate labeles via this code for example image name=math-1 , lable = 1
def get_lable_and_image(path):
lbl = []
img = []
for filename in glob.glob(os.path.join(path, '*.png')):
img.append(filename)
lable = filename[41:].split()[0].split('-')[1]
lbl.append(int(lable))
lables = np.array(lbl)
images = np.array(img)
# print(images[1], lables[1])
return images, lables
i push images and lables to create tfrecords
def convert(image_paths, labels, out_path):
# Args:
# image_paths List of file-paths for the images.
# labels Class-labels for the images.
# out_path File-path for the TFRecords output file.
print("Converting: " + out_path)
# Number of images. Used when printing the progress.
num_images = len(image_paths)
# Open a TFRecordWriter for the output-file.
with tf.python_io.TFRecordWriter(out_path) as writer:
# Iterate over all the image-paths and class-labels.
for i, (path, label) in enumerate(zip(image_paths, labels)):
# Print the percentage-progress.
print_progress(count=i, total=num_images-1)
# Load the image-file using matplotlib's imread function.
img = imread(path)
# Convert the image to raw bytes.
img_bytes = img.tostring()
# Create a dict with the data we want to save in the
# TFRecords file. You can add more relevant data here.
data = \
{
'image': wrap_bytes(img_bytes),
'label': wrap_int64(label)
}
# Wrap the data as TensorFlow Features.
feature = tf.train.Features(feature=data)
# Wrap again as a TensorFlow Example.
example = tf.train.Example(features=feature)
# Serialize the data.
serialized = example.SerializeToString()
# Write the serialized data to the TFRecords file.
writer.write(serialized)

How to use tf.contrib.seq2seq.Helper for non-embedding data?

I'm trying to use tf.contrib.seq2seq module to do forecasting on some data (just float32 vectors) but all the examples I found using the seq2seq module from TensorFlow are used for translation and therefore embeddings.
I'm struggling to understand exactly what tf.contrib.seq2seq.Helper is doing for the Seq2Seq architecture and how I can use the CustomHelper in my case.
This is what I've done for now:
import tensorflow as tf
from tensorflow.python.layers import core as layers_core
input_seq_len = 15 # Sequence length as input
input_dim = 1 # Nb of features in input
output_seq_len = forecast_len = 20 # horizon length for forecasting
output_dim = 1 # nb of features to forecast
encoder_units = 200 # nb of units in each cell for the encoder
decoder_units = 200 # nb of units in each cell for the decoder
attention_units = 100
batch_size = 8
graph = tf.Graph()
with graph.as_default():
learning_ = tf.placeholder(tf.float32)
with tf.variable_scope('Seq2Seq'):
# Placeholder for encoder input
enc_input = tf.placeholder(tf.float32, [None, input_seq_len, input_dim])
# Placeholder for decoder output - Targets
target = tf.placeholder(tf.float32, [None, output_seq_len, output_dim])
### BUILD THE ENCODER
# Build RNN cell
encoder_cell = tf.nn.rnn_cell.BasicLSTMCell(encoder_units)
initial_state = encoder_cell.zero_state(batch_size, dtype=tf.float32)
# Run Dynamic RNN
# encoder_outputs: [batch_size, seq_size, num_units]
# encoder_state: [batch_size, num_units]
encoder_outputs, encoder_state = tf.nn.dynamic_rnn(encoder_cell, enc_input, initial_state=initial_state)
## Attention layer
attention_mechanism_bahdanau = tf.contrib.seq2seq.BahdanauAttention(
num_units = attention_units, # depth of query mechanism
memory = encoder_outputs, # hidden states to attend (output of RNN)
normalize=False, # normalize energy term
name='BahdanauAttention')
attention_mechanism_luong = tf.contrib.seq2seq.LuongAttention(
num_units = encoder_units,
memory = encoder_outputs,
scale=False,
name='LuongAttention'
)
### BUILD THE DECODER
# Simple Dense layer to project from rnn_dim to the desired output_dim
projection = layers_core.Dense(output_dim, use_bias=True, name="output_projection")
helper = tf.contrib.seq2seq.TrainingHelper(target, sequence_length=[output_seq_len for _ in range(batch_size)])
## This is where I don't really know what to do in my case, is this function changing my data into [ GO, data, END] ?
decoder_cell = tf.nn.rnn_cell.BasicLSTMCell(decoder_units)
attention_cell = tf.contrib.seq2seq.AttentionWrapper(
cell = decoder_cell,
attention_mechanism = attention_mechanism_luong, # Instance of AttentionMechanism
attention_layer_size = attention_units,
name="attention_wrapper")
initial_state = attention_cell.zero_state(batch_size=batch_size, dtype=tf.float32)
initial_state = initial_state.clone(cell_state=encoder_state)
decoder = tf.contrib.seq2seq.BasicDecoder(attention_cell, initial_state=initial_state, helper=helper, output_layer=projection)
outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(decoder=decoder)
# Loss function:
loss = 0.5*tf.reduce_sum(tf.square(outputs[0] - target), -1)
loss = tf.reduce_mean(loss, 1)
loss = tf.reduce_mean(loss)
# Optimizer
optimizer = tf.train.AdamOptimizer(learning_).minimize(loss)
I understood that Training state and Inference state are quite different for the Seq2seq architecture but I don't know how to use the Helpers from the module in order to distinguish both.
I'm using this module because it's quite useful for Attention Layers.
How can I use the Helper in order to create a ['Go' , [input_sequence]] for the decoder ?