Keras model to Tensorflow to input b64 encoded data instead of numpy ml-engine predict - tensorflow

I am trying to convert a keras model to use it for predictions on google cloud's ml-engine. I have a pre-trained classifier that takes in a numpy array as input. The normal working data I send to model.predict is named input_data.
I convert it to base 64 and dump it to a json file using the following few lines:
data = {}
data['image_bytes'] = [{'b64':base64.b64encode(input_data.tostring())}]
with open('weights/keras/example.json', 'w') as outfile:
json.dump(data, outfile)
Now, I try to create the TF model from my existing model:
from keras.models import model_from_json
import tensorflow as tf
from keras import backend as K
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import utils
from tensorflow.python.saved_model import tag_constants, signature_constants
from tensorflow.python.saved_model.signature_def_utils_impl import build_signature_def, predict_signature_def
init = tf.global_variables_initializer()
with tf.Session() as sess:
K.set_session(sess)
sess.run(init)
print("Keras model & weights loading...")
K.set_learning_phase(0)
with open(json_file_path, 'r') as file_handle:
model = model_from_json(file_handle.read())
model.load_weights(weight_file_path)
builder = saved_model_builder.SavedModelBuilder(export_path)
raw_byte_strings = tf.placeholder(dtype=tf.string, shape=[None], name='source')
decode = lambda raw_byte_str: tf.decode_raw(raw_byte_str, tf.float32)
input_images = tf.map_fn(decode, raw_byte_strings)
print(input_images)
signature = predict_signature_def(inputs={'image_bytes': input_images},
outputs={'output': model.output})
builder.add_meta_graph_and_variables(
sess=sess,
tags=[tag_constants.SERVING],
signature_def_map={
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature
}
)
builder.save()
When I try to test this locally I get the following error:
ERROR:root:Exception during running the graph: You must feed a value for placeholder tensor 'input_1' with dtype float and shape [?,473,473,3]
[[Node: input_1 = Placeholder[dtype=DT_FLOAT, shape=[?,473,473,3], _device="/job:localhost/replica:0/task:0/device:CPU:0"]()]]
Help?

Related

Missing ops for a model created in Microsoft Custom Vision and exported to tflite

We have trained a model using Microsoft’s Custom Vision. When we try to convert the .pb to .tflite, we run into errors. This is the python code we use is the following:
import tensorflow as tf
from tensorflow import data
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.tools import freeze_graph
from tensorflow.python import ops
from tensorflow.tools.graph_transforms import TransformGraph
graph_def_file = 'model.pb'
input_arrays = ['Placeholder']
output_arrays = ['model_outputs']
transforms = [
'remove_nodes(op=Identity)',
'merge_duplicate_nodes',
'strip_unused_nodes',
'fold_constants(ignore_errors=true)',
'fold_batch_norms'
]
def get_graph_def_from_file(graph_filepath):
with ops.Graph().as_default():
with tf.gfile.GFile(graph_filepath, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
return graph_def
graph_def = get_graph_def_from_file(graph_def_file)
optimized_graph_def = TransformGraph(graph_def, input_arrays, output_arrays, transforms)
tf.train.write_graph(optimized_graph_def, logdir='', as_text=False, name='optimized_model.pb')
optimized_graph_def_file = 'optimized_model.pb'
converter = tf.lite.TFLiteConverter.from_frozen_graph(optimized_graph_def_file,
input_arrays,
output_arrays)
converter.target_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, # We also tried with TF_OPS only and TFLITE_BUILTINS only
tf.lite.OpsSet.SELECT_TF_OPS]
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
tflite_model = converter.convert()
open('optimized_converted_model_2.tflite', 'wb').write(tflite_model)
This is the input of the network and it does a convolution fairly early.
As you can see, it appears TensorFlow does not appear to be recognizing this operation:

Error converting keras model to tfjs: duplicate weight name Variable

Follwing the tutorial at https://www.tensorflow.org/tutorials/images/hub_with_keras resulted in a file model.h5. Converting to tensorflow-js with the command
tensorflowjs_converter --input_format keras ./model.h5 /tmp/jsmodel/
failed with
Exception: Error dumping weights, duplicate weight name Variable
Why is this and how can it be fixed?
MCVE
from __future__ import absolute_import, division, print_function
import tensorflow as tf
import tensorflow_hub as hub
from tensorflow.keras import layers
import numpy as np
data_root = tf.keras.utils.get_file(
'flower_photos','https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',
untar=True)
image_generator = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1/255)
IMAGE_SHAPE = (224, 224)
image_data = image_generator.flow_from_directory(str(data_root), target_size=IMAGE_SHAPE)
feature_extractor_url = "https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/2" ##param {type:"string"}
feature_extractor_layer = hub.KerasLayer(feature_extractor_url,
input_shape=(224,224,3))
for image_batch, label_batch in image_data:
print("Image batch shape: ", image_batch.shape)
print("Labe batch shape: ", label_batch.shape)
break
feature_extractor_layer.trainable = False
model = tf.keras.Sequential([
feature_extractor_layer,
layers.Dense(image_data.num_classes, activation='softmax')
])
model.compile(
optimizer=tf.keras.optimizers.Adam(),
loss='categorical_crossentropy',
metrics=['acc'])
steps_per_epoch = np.ceil(image_data.samples/image_data.batch_size)
history = model.fit(image_data, epochs=2,
steps_per_epoch=steps_per_epoch) # removed callback
model.save("/tmp/so_model.h5")
This fails with a
RuntimeError: Unable to create link (name already exists)
but the model is created. Calling the above tensorflowjs_converter --input_format keras /tmp/model.h5 /tmp/jsmodel fails with the above
Exception: Error dumping weights, duplicate weight name Variable
UPDATE: see also Retrain image detection with MobileNet

Get predictions from deployed TF for poet model in google cloud

I deployed my retrained TF for poet model in google cloud. currently, I am trying to get predictions from that. But it gives the following error.
"error": "Prediction failed: Error during model execution: AbortionError(code=StatusCode.INVALID_ARGUMENT, details=\"contents must be scalar, got shape [1]\n\t [[{{node DecodeJpeg}}]]\")"
Following code, I used to get serving model
import tensorflow as tf
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.saved_model import builder as saved_model_builder
input_graph = 'retrained_graph.pb'
saved_model_dir = 'my_model'
with tf.Graph().as_default() as graph:
# Read in the export graph
with tf.gfile.FastGFile(input_graph, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
# Define SavedModel Signature (inputs and outputs)
in_image = graph.get_tensor_by_name('DecodeJpeg/contents:0')
inputs = {'image_bytes': tf.saved_model.utils.build_tensor_info(in_image)}
out_classes = graph.get_tensor_by_name('final_result:0')
outputs = {'prediction': tf.saved_model.utils.build_tensor_info(out_classes)}
signature = tf.saved_model.signature_def_utils.build_signature_def(
inputs=inputs,
outputs=outputs,
method_name='tensorflow/serving/predict'
)
with tf.Session(graph=graph) as sess:
# Save out the SavedModel.
b = saved_model_builder.SavedModelBuilder(saved_model_dir)
b.add_meta_graph_and_variables(sess,[tf.saved_model.tag_constants.SERVING],signature_def_map={'serving_default': signature})
b.save()
request.json generating code
python -c 'import base64, sys, json; img = base64.b64encode(open(sys.argv[1], "rb").read()); print json.dumps({"image_bytes": {"b64": img}})' test.jpg &> request.json
You can try:
{"instances": [{"image_bytes": {"b64": encoded_string}, "key": "0"}]}

Keras model to TensorFlow Server Model

I get an error when accessing
{ "error": "Generic conv implementation does not support grouped convolutions for now.\n\t [[{{node model_1/conv2d_1/Conv2D}}]]" }
I translate the model to TF server
import tensorflow as tf
tf.keras.backend.set_learning_phase(0)
model = tf.keras.models.load_model(r'model.h5')
export_path = 'my_image_classifier/1'
with tf.keras.backend.get_session() as sess:
tf.saved_model.simple_save(
sess,
export_path,
inputs={'input_image': model.input},
outputs={t.name: t for t in model.outputs})
What do i do? I need a model on the server
System Ubuntu 18.04
TF server 1.12(Docker)
Keras 1.2.4
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.saved_model.signature_def_utils_impl import predict_signature_def
from tensorflow.keras import backend as K
import tensorflow as tf
export_path = 'model'
sess = tf.Session()
K.set_session(sess)
K.set_learning_phase(0)
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = tf.keras.models.model_from_json(loaded_model_json)
model.load_weights('01.h5')
model.summary()
builder = saved_model_builder.SavedModelBuilder(export_path)
signature = predict_signature_def(inputs={'input_image': model.get_layer(name='the_input').input},
outputs={'out': model.get_layer(name='the_output').output})
with K.get_session() as sess:
builder.add_meta_graph_and_variables(sess=sess,
tags=[tag_constants.SERVING],
signature_def_map={'predict': signature},
strip_default_attrs=True)
builder.save()
The main thing is to include
strip_default_attrs=True

Convert a graph proto (pb/pbtxt) to a SavedModel for use in TensorFlow Serving or Cloud ML Engine

I've been following the TensorFlow for Poets 2 codelab on a model I've trained, and have created a frozen, quantized graph with embedded weights. It's captured in a single file - say my_quant_graph.pb.
Since I can use that graph for inference with the TensorFlow Android inference library just fine, I thought I could do the same with Cloud ML Engine, but it seems it only works on a SavedModel model.
How can I simply convert a frozen/quantized graph in a single pb file to use on ML engine?
It turns out that a SavedModel provides some extra info around a saved graph. Assuming a frozen graph doesn't need assets, then it needs only a serving signature specified.
Here's the python code I ran to convert my graph to a format that Cloud ML engine accepted. Note I only have a single pair of input/output tensors.
import tensorflow as tf
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
export_dir = './saved'
graph_pb = 'my_quant_graph.pb'
builder = tf.saved_model.builder.SavedModelBuilder(export_dir)
with tf.gfile.GFile(graph_pb, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
sigs = {}
with tf.Session(graph=tf.Graph()) as sess:
# name="" is important to ensure we don't get spurious prefixing
tf.import_graph_def(graph_def, name="")
g = tf.get_default_graph()
inp = g.get_tensor_by_name("real_A_and_B_images:0")
out = g.get_tensor_by_name("generator/Tanh:0")
sigs[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = \
tf.saved_model.signature_def_utils.predict_signature_def(
{"in": inp}, {"out": out})
builder.add_meta_graph_and_variables(sess,
[tag_constants.SERVING],
signature_def_map=sigs)
builder.save()
There is sample with multiple outputs nodes:
# Convert PtotoBuf model to saved_model, format for TF Serving
# https://cloud.google.com/ai-platform/prediction/docs/exporting-savedmodel-for-prediction
import shutil
import tensorflow.compat.v1 as tf
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
export_dir = './1' # TF Serving supports run different versions of same model. So we put current model to '1' folder.
graph_pb = 'frozen_inference_graph.pb'
# Clear out folder
shutil.rmtree(export_dir, ignore_errors=True)
builder = tf.saved_model.builder.SavedModelBuilder(export_dir)
with tf.io.gfile.GFile(graph_pb, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
sigs = {}
with tf.Session(graph=tf.Graph()) as sess:
# Prepare input and outputs of model
tf.import_graph_def(graph_def, name="")
g = tf.get_default_graph()
image_tensor = g.get_tensor_by_name("image_tensor:0")
num_detections = g.get_tensor_by_name("num_detections:0")
detection_scores = g.get_tensor_by_name("detection_scores:0")
detection_boxes = g.get_tensor_by_name("detection_boxes:0")
detection_classes = g.get_tensor_by_name("detection_classes:0")
sigs[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = \
tf.saved_model.signature_def_utils.predict_signature_def(
{"input_image": image_tensor},
{ "num_detections": num_detections,
"detection_scores": detection_scores,
"detection_boxes": detection_boxes,
"detection_classes": detection_classes})
builder.add_meta_graph_and_variables(sess,
[tag_constants.SERVING],
signature_def_map=sigs)
builder.save()