Create SavedModel for BERT - tensorflow

I'm using this Colab for BERT model.
In last cells in order to make predictions we have:
def getPrediction(in_sentences):
labels = ["Negative", "Positive"]
input_examples = [run_classifier.InputExample(guid="", text_a = x, text_b = None, label = 0) for x in in_sentences] # here, "" is just a dummy label
input_features = run_classifier.convert_examples_to_features(input_examples, label_list, MAX_SEQ_LENGTH, tokenizer)
predict_input_fn = run_classifier.input_fn_builder(features=input_features, seq_length=MAX_SEQ_LENGTH, is_training=False, drop_remainder=False)
predictions = estimator.predict(predict_input_fn)
return [(sentence, prediction['probabilities'], labels[prediction['labels']]) for sentence, prediction in zip(in_sentences, predictions)]
pred_sentences = [
"That movie was absolutely awful",
"The acting was a bit lacking",
"The film was creative and surprising",
"Absolutely fantastic!"
]
predictions = getPrediction(pred_sentences)
I want to create a 'SavedModel' to be used with TF serving. How to create a SavedModel for this model?
Normally I would define the following:
def serving_input_fn():
"""Create serving input function to be able to serve predictions later
using provided inputs
:return:
"""
feature_placeholders = {
'sentence': tf.placeholder(tf.string, [None]),
}
return tf.estimator.export.ServingInputReceiver(feature_placeholders,
feature_placeholders)
latest_ckpt = tf.train.latest_checkpoint(OUTPUT_DIR)
last_eval = estimator.evaluate(input_fn=test_input_fn, steps=None, checkpoint_path=latest_ckpt)
# Export the model to GCS for serving.
exporter = tf.estimator.LatestExporter('exporter', serving_input_fn, exports_to_keep=None)
exporter.export(estimator, OUTPUT_DIR, latest_ckpt, last_eval, is_the_final_export=True)
Not sure how to define my tf.estimator.export.ServingInputReceiver

If you look at create_model function present in notebook. It takes some arguments. These are the features which will be passed to the model.
You need to update the serving_input_fn function to include them.
def serving_input_fn():
feature_spec = {
"input_ids" : tf.FixedLenFeature([MAX_SEQ_LENGTH], tf.int64),
"input_mask" : tf.FixedLenFeature([MAX_SEQ_LENGTH], tf.int64),
"segment_ids" : tf.FixedLenFeature([MAX_SEQ_LENGTH], tf.int64),
"label_ids" : tf.FixedLenFeature([], tf.int64)
}
serialized_tf_example = tf.placeholder(dtype=tf.string,
shape=[None],
name='input_example_tensor')
receiver_tensors = {'example': serialized_tf_example}
features = tf.parse_example(serialized_tf_example, feature_spec)
return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)

Related

Use Tfrecord to feed multi-input neural network

I have a dataset on Tfrecord that includes images and tabular data, I want to feed those data into a mixed neural network data. I saw examples of how to use multi-input neural network by using numpy arrays
model.fit([X_images,X_tabular],[Y_images,Y_tabular])
but transforming Tfrecord dataset into a numpy array by iterating slows down the process.
Is there a way to train it directly on the Tfrecord data. So far I have a model that train only an images and this is the code that I am using
tfrecords_schema = {
'id': tf.io.FixedLenFeature([], tf.string),
'y_label': tf.io.FixedLenFeature([], tf.int64),
'image': tf.io.FixedLenFeature([], tf.string),
'x_1': tf.io.FixedLenFeature([], tf.float32),
'x_2': tf.io.FixedLenFeature([], tf.float32),
'x_3': tf.io.FixedLenFeature([], tf.int64),
'x_4': tf.io.FixedLenFeature([], tf.int64),
}
def _parse_function(example_proto):
parsed_example = tf.io.parse_single_example(example_proto, tfrecords_schema)
return parsed_example['image'], parsed_example['y_label']
def _parse_error(image, label):
return image != b''
def _preprocess_function(image, label):
image = tf.image.decode_jpeg(image)
image = tf.image.resize(image, (299, 299))
label = label_decoder(label)
return image, label
train_data = (
train_data
.map(_parse_function)
.filter(_parse_error)
.map(_preprocess_function)
.apply(tf.data.experimental.ignore_errors())
.batch(BATCH_SIZE)
)
validation_data = (
validation_data
.map(_parse_function)
.filter(_parse_error)
.map(_preprocess_function)
.apply(tf.data.experimental.ignore_errors())
.batch(BATCH_SIZE)
)
AUTOTUNE = tf.data.experimental.AUTOTUNE
train_data = train_data.prefetch(buffer_size=AUTOTUNE)
validation_data = validation_data.prefetch(buffer_size=AUTOTUNE)
def make_model(image_shape, num_classes):
inputs = tf.keras.Input(shape=image_shape)
x = tf.keras.applications.inception_v3.preprocess_input(inputs)
base_model = tf.keras.applications.InceptionV3(input_shape=IMAGE_SHAPE, include_top=False, weights='imagenet')
base_model.trainable = False
x = base_model(x, training=False)
global_average_layer = tf.keras.layers.GlobalAveragePooling2D()
x = global_average_layer(x)
x = tf.keras.layers.Dropout(0.3)(x)
prediction_layer = tf.keras.layers.Dense(num_classes)
outputs = prediction_layer(x)
return tf.keras.Model(inputs, outputs)
model = make_model(IMAGE_SHAPE, NUM_CLASSES)
learning_rate = 0.0001
model.compile(
optimizer=tf.keras.optimizers.Adam(lr=learning_rate),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["accuracy"]
)
callbacks = [
tf.keras.callbacks.ModelCheckpoint(filepath="tf/pvc_data/tmp/weights.hdf5", verbose=1, save_best_only=True)
]
history = model.fit(
train_data, epochs=1, callbacks=callbacks, validation_data=validation_data
)

tf.estimator serving function failing

I am using the tf.estimator to train and serve my tensorflow model. the training completed as expected, but fails in serving. I read my data in as a TFRecordDataset. My parsing function applies a transformation to feature "x2". "x2" is a string that is split. the tranformed feature is "x3".
def parse_function(example_proto):
features={"x1":tf.FixedLenFeature((), tf.string), "x2":tf.FixedLenFeature((),
tf.string),
"label":tf.FixedLenFeature((), tf.int64)}
parsed_features = tf.parse_example(example_proto, features)
x3=tf.string_split(parsed_features["string"],',')
parsed_features["x3"]=x3
return parsed_features, parsed_features["label"]
My serving fucnction is
def serving_input_fn():
receiver_tensor = {}
for feature_name in record_columns:
if feature_name in {"x1", "x2","x3"}:
dtype = tf.string
else:
dtype=tf.int32
receiver_tensor[feature_name] = tf.placeholder(dtype, shape=[None])
features = {
key: tf.expand_dims(tensor, -1)
for key, tensor in receiver_tensor.items()
}
return tf.estimator.export.ServingInputReceiver(features, receiver_tensor)
It always worked in the past when I didn't have any transformations in my parsing function, but it fails now with the error.
cloud.ml.prediction.prediction_utils.PredictionError: Failed to run the provided model: Exception during running the graph: Cannot feed value of shape (2, 1) for Tensor u'Placeholder_2:0', which has shape '(?,)' (Error code: 2)
I think I have to apply the transformation to "x2" in my serving function, but I don't know how. Any help would be greatly appreciated
Following this link
I processed feature "x3" after creating the receiver_tensor. Splitting the string in the serving fucntion required squeezing the tensor before splitting
def serving_input_fn():
receiver_tensor = {}
receiver_tensor["x1"] = tf.placeholder(tf.string, shape=[None], name="x1")
receiver_tensor["label"] = tf.placeholder(tf.int32, shape=[None], name="x2")
receiver_tensor["x2"] = tf.placeholder(tf.string, shape=[None],
name="string")
features = {
key: tf.expand_dims(tensor, -1)
for key, tensor in receiver_tensor.items()
}
features["x3"]=tf.string_split(tf.squeeze(features["x2"]),',')
return tf.estimator.export.ServingInputReceiver(features, receiver_tensor)

How to create Tensorflow serving_input_receiver_fn with multiple features?

In the TF guide on saving models there is a paragraph on serving_input_receiver_fn that talks about implementing functions for preprocessing logic. I'm trying to do some normalization of input data for a DNNRegressor. Their code for the function looks like this:
feature_spec = {'foo': tf.FixedLenFeature(...),
'bar': tf.VarLenFeature(...)}
def serving_input_receiver_fn():
"""An input receiver that expects a serialized tf.Example."""
serialized_tf_example = tf.placeholder(dtype=tf.string,
shape=[default_batch_size],
name='input_example_tensor')
receiver_tensors = {'examples': serialized_tf_example}
features = tf.parse_example(serialized_tf_example, feature_spec)
return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)
My code looks like this:
feat_cols = [
tf.feature_column.numeric_column(key="FEATURE1"),
tf.feature_column.numeric_column(key="FEATURE2")
]
def serving_input_receiver_fn():
feature_spec = tf.feature_column.make_parse_example_spec(feat_cols)
default_batch_size = 1
serialized_tf_example = tf.placeholder(dtype=tf.string, shape=[default_batch_size], name='tf_example')
receiver_tensors = { 'examples': serialized_tf_example}
features = tf.parse_example(serialized_tf_example, feature_spec)
fn_norm1 = lamba FEATURE1: normalize_input_data('FEATURE1', FEATURE1)
fn_norm2 = lamba FEATURE2: normalize_input_data('FEATURE2', FEATURE2)
features['FEATURE1'] = tf.map_fn(fn_norm1, features['FEATURE1'], dtype=tf.float32)
features['FEATURE2'] = tf.map_fn(fn_norm2, features['FEATURE2'], dtype=tf.float32)
return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)
After all of that the saved model has none of my features in the graph. I'm trying to figure out how this works if you have more than one feature you are trying to pass.
I created an example using the keras MPG data. It is located here:
features in ServingInputReceiver is passed directly to your model function. What you want is receive_tensors or receive_tensor_alternatives, that is, the second and third argument to ServingInputReceiver constructor.
For example, you may do this
serialized_tf_example = tf.placeholder(dtype=tf.string, shape=[default_batch_size], name='tf_example')
receiver_tensors = { 'examples': serialized_tf_example}
raw_features = tf.parse_example(serialized_tf_example, feature_spec)
fn_norm1 = lamba FEATURE1: normalize_input_data('FEATURE1', FEATURE1)
fn_norm2 = lamba FEATURE2: normalize_input_data('FEATURE2', FEATURE2)
features['FEATURE1'] = tf.map_fn(fn_norm1, raw_features['FEATURE1'], dtype=tf.float32)
features['FEATURE2'] = tf.map_fn(fn_norm2, raw_features['FEATURE2'], dtype=tf.float32)
return tf.estimator.export.ServingInputReceiver(
features=features,
receiver_tensors=receiver_tensors,
receiver_tensors_alternatives={'SOME_KEY': raw_features})
If you don't ever need to feed the network with an Example proto, you can skip it entirely.
raw_features = {'FEATURE1': tf.placeholder(...), 'FEATURE2': tf.placeholder(...)}
features = preprepocess(raw_features)
return tf.estimator.export.ServingInputReceiver(features, {'SOME_OTHER_KEY': raw_features})

Unable to use core Estimator with contrib Predictor

I'm using canned estimators and are struggling with poor predict performance so I'm trying to use tf.contrib.predictor to improve my inference performance. I've made this minimalistic example to reproduce my problems:
import tensorflow as tf
from tensorflow.contrib import predictor
def serving_input_fn():
x = tf.placeholder(dtype=tf.string, shape=[1], name='x')
inputs = {'x': x }
return tf.estimator.export.ServingInputReceiver(inputs, inputs)
input_feature_column = tf.feature_column.numeric_column('x', shape=[1])
estimator = tf.estimator.DNNRegressor(
feature_columns=[input_feature_column],
hidden_units=[10, 20, 10],
model_dir="model_dir\\predictor-test")
estimator_predictor = predictor.from_estimator(estimator, serving_input_fn)
estimator_predictor({"inputs": ["1.0"]})
This yields the following exception:
UnimplementedError (see above for traceback): Cast string to float is not supported
[[Node: dnn/input_from_feature_columns/input_layer/x/ToFloat = Cast[DstT=DT_FLOAT, SrcT=DT_STRING, _device="/job:localhost/replica:0/task:0/device:CPU:0"](dnn/input_from_feature_columns/input_layer/x/ExpandDims)]]
I've tried using tf.estimator.export.TensorServingInputReceiver instead of ServingInputReceiver in my serving_input_fn(), so that I can feed my model with a numerical tensor which is what I want:
def serving_input_fn():
x = tf.placeholder(dtype=tf.float32, shape=[1], name='x')
return tf.estimator.export.TensorServingInputReceiver(x, x)
but then I get the following exception in my predictor.from_estimator() call:
ValueError: features should be a dictionary of Tensors. Given type: <class 'tensorflow.python.framework.ops.Tensor'>
Any ideas?
My understanding of all of this is not really solid but I got it working and given the size of the community, I'll try to share what I did.
First, I'm running tensorflow 1.5 binaries with this patch applied manually.
The exact code I'm running is this:
def serving_input_fn():
x = tf.placeholder(dtype=tf.float32, shape=[3500], name='x')
inputs = {'x': x }
return tf.estimator.export.ServingInputReceiver(inputs, inputs)
estimator = tf.estimator.Estimator(
model_fn=model_fn,
model_dir="{}/model_dir_{}/model.ckpt-103712".format(script_dir, 3))
estimator_predictor = tf.contrib.predictor.from_estimator(
estimator, serving_input_fn)
p = estimator_predictor(
{"x": np.array(sample.normalized.input_data)})
My case is a bit different than your example because I'm using a custom Estimator but in your case, I guess you should try something like this:
def serving_input_fn():
x = tf.placeholder(dtype=tf.float32, shape=[1], name='x')
inputs = {'x': x }
return tf.estimator.export.ServingInputReceiver(inputs, inputs)
estimator = ...
estimator_predictor = tf.contrib.predictor.from_estimator(
estimator, serving_input_fn)
estimator_predictor({"x": [1.0]})
error is in following line:
estimator_predictor({"inputs": ["1.0"]})
please put 1.0 out of quotes. Currently it's a string.
After having worked on this for a couple of days, I want to share what I have done. The following code is also available from https://github.com/dage/tensorflow-estimator-predictor-example
TL;DR: predictor works best with custom estimators and the performance increase is massive.
import tensorflow as tf
import numpy as np
import datetime
import time
FEATURES_RANK = 3 # The number of inputs
LABELS_RANK = 2 # The number of outputs
# Returns a numpy array of rank LABELS_RANK based on the features argument.
# Can be used when creating a training dataset.
def features_to_labels(features):
sum_column = features.sum(1).reshape(features.shape[0], 1)
labels = np.hstack((sum_column*i for i in range(1, LABELS_RANK+1)))
return labels
def serving_input_fn():
x = tf.placeholder(dtype=tf.float32, shape=[None, FEATURES_RANK], name='x') # match dtype in input_fn
inputs = {'x': x }
return tf.estimator.export.ServingInputReceiver(inputs, inputs)
def model_fn(features, labels, mode):
net = features["x"] # input
for units in [4, 8, 4]: # hidden units
net = tf.layers.dense(net, units=units, activation=tf.nn.relu)
net = tf.layers.dropout(net, rate=0.1)
output = tf.layers.dense(net, LABELS_RANK, activation=None)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode, predictions=output, export_outputs={"out": tf.estimator.export.PredictOutput(output)})
loss = tf.losses.mean_squared_error(labels, output)
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode, loss=loss)
optimizer = tf.train.AdagradOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
# expecting a numpy array of shape (1, FEATURE_RANK) for constant_feature argument
def input_fn(num_samples, constant_feature = None, is_infinite = True):
feature_values = np.full((num_samples, FEATURES_RANK), constant_feature) if isinstance(constant_feature, np.ndarray) else np.random.rand(num_samples, FEATURES_RANK)
feature_values = np.float32(feature_values) # match dtype in serving_input_fn
labels = features_to_labels(feature_values)
dataset = tf.data.Dataset.from_tensors(({"x": feature_values}, labels))
if is_infinite:
dataset = dataset.repeat()
return dataset.make_one_shot_iterator().get_next()
estimator = tf.estimator.Estimator(
model_fn=model_fn,
model_dir="model_dir\\estimator-predictor-test-{date:%Y-%m-%d %H.%M.%S}".format(date=datetime.datetime.now()))
train = estimator.train(input_fn=lambda : input_fn(50), steps=500)
evaluate = estimator.evaluate(input_fn=lambda : input_fn(20), steps=1)
predictor = tf.contrib.predictor.from_estimator(estimator, serving_input_fn)
consistency_check_features = np.random.rand(1, FEATURES_RANK)
consistency_check_labels = features_to_labels(consistency_check_features)
num_calls_predictor = 100
predictor_input = {"x": consistency_check_features}
start_time_predictor = time.clock()
for i in range(num_calls_predictor):
predictor_prediction = predictor(predictor_input)
delta_time_predictor = 1./num_calls_predictor*(time.clock() - start_time_predictor)
num_calls_estimator_predict = 10
estimator_input = lambda : input_fn(1, consistency_check_features, False)
start_time_estimator_predict = time.clock()
for i in range(num_calls_estimator_predict):
estimator_prediction = list(estimator.predict(input_fn=estimator_input))
delta_time_estimator = 1./num_calls_estimator_predict*(time.clock() - start_time_estimator_predict)
print("{} --> {}\n predictor={}\n estimator={}.\n".format(consistency_check_features, consistency_check_labels, predictor_prediction, estimator_prediction))
print("Time used per estimator.predict() call: {:.5f}s, predictor(): {:.5f}s ==> predictor is {:.0f}x faster!".format(delta_time_estimator, delta_time_predictor, delta_time_estimator/delta_time_predictor))
On my laptop I get the following results:
[[0.55424854 0.98057611 0.98604857]] --> [[2.52087322 5.04174644]]
predictor={'output': array([[2.5221248, 5.049496 ]], dtype=float32)}
estimator=[array([2.5221248, 5.049496 ], dtype=float32)].
Time used per estimator.predict() call: 0.30071s, predictor(): 0.00057s ==> predictor is 530x faster!

make tf.Estimator use default graph

I am trying to make use of tensorflow protobuffer feeding pipeline. The easiest way seemed to use tf.estimator.Estimator with tf.contrib.data.TFRecordDataset. However, I came across the issue that it creates a new Graph in spite of being launched within with g.as_default(). In following code I see that both model tensors and tensors returned by the TFRecordDataset are the same before I feed them to Estimator, but become different within the Estimator. Any ideas how to put them on the same graph?
# coding: utf-8
import sys
import tensorflow as tf
from keras.applications.inception_v3 import InceptionV3
import numpy as np
final_activation='linear'
g = tf.Graph()
with g.as_default():
model = InceptionV3(weights='imagenet',
include_top=True,
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000)
def model_fn(mode, features, labels, params):
optimizer = params["optimizer"]
opt_params= params.get("opt_params", {})
predictions = model(features)
if (mode == tf.estimator.ModeKeys.TRAIN or
mode == tf.estimator.ModeKeys.EVAL):
loss = tf.contrib.keras.backend.categorical_crossentropy(predictions, labels)
#loss = tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logyhat)
else:
loss = None
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = getattr(tf.train, optimizer)
train_op = optimizer(opt_params).minimize(loss)
else:
train_op = None
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op)
def parser(record):
keys_to_features = {
'height': tf.FixedLenFeature([], tf.int64),
'width': tf.FixedLenFeature([], tf.int64),
'image_raw': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64)
}
features = tf.parse_single_example(
record,
features=keys_to_features)
# Convert from a scalar string tensor to a uint8 tensor
image = tf.decode_raw(features['image_raw'], tf.float32)
height = tf.cast(features['height'], tf.int32)
width = tf.cast(features['width'], tf.int32)
image_shape = tf.stack([height, width, 3])
image = tf.reshape(image, image_shape)
label = tf.cast(features["label"], tf.int32)
return image, label
def get_dataset_inp_fn(filenames, epochs=20):
def dataset_input_fn():
dataset = tf.contrib.data.TFRecordDataset(filenames)
# Use `Dataset.map()` to build a pair of a feature dictionary and a label
# tensor for each example.
dataset = dataset.map(parser)
dataset = dataset.shuffle(buffer_size=10000)
dataset = dataset.batch(32)
dataset = dataset.repeat(epochs)
iterator = dataset.make_one_shot_iterator()
features, labels = iterator.get_next()
return features, labels
return dataset_input_fn
inpfun = get_dataset_inp_fn(["mydataset.tfrecords"], epochs=20)
x,y = inpfun()
print("X", x.graph)
print("DEFAULT", g)
print("MODEL", model.input.graph)
# everything is on the same graph
if not x.graph is tf.get_default_graph():
raise ValueError()
with tf.Session(graph=g) as sess:
est = tf.estimator.Estimator(
model_fn,
model_dir=None,
config=None,
params={"optimizer": "AdamOptimizer",
"opt_params":{}}
)
est.train(inpfun)