I deployed my retrained TF for poet model in google cloud. currently, I am trying to get predictions from that. But it gives the following error.
"error": "Prediction failed: Error during model execution: AbortionError(code=StatusCode.INVALID_ARGUMENT, details=\"contents must be scalar, got shape [1]\n\t [[{{node DecodeJpeg}}]]\")"
Following code, I used to get serving model
import tensorflow as tf
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.saved_model import builder as saved_model_builder
input_graph = 'retrained_graph.pb'
saved_model_dir = 'my_model'
with tf.Graph().as_default() as graph:
# Read in the export graph
with tf.gfile.FastGFile(input_graph, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
# Define SavedModel Signature (inputs and outputs)
in_image = graph.get_tensor_by_name('DecodeJpeg/contents:0')
inputs = {'image_bytes': tf.saved_model.utils.build_tensor_info(in_image)}
out_classes = graph.get_tensor_by_name('final_result:0')
outputs = {'prediction': tf.saved_model.utils.build_tensor_info(out_classes)}
signature = tf.saved_model.signature_def_utils.build_signature_def(
inputs=inputs,
outputs=outputs,
method_name='tensorflow/serving/predict'
)
with tf.Session(graph=graph) as sess:
# Save out the SavedModel.
b = saved_model_builder.SavedModelBuilder(saved_model_dir)
b.add_meta_graph_and_variables(sess,[tf.saved_model.tag_constants.SERVING],signature_def_map={'serving_default': signature})
b.save()
request.json generating code
python -c 'import base64, sys, json; img = base64.b64encode(open(sys.argv[1], "rb").read()); print json.dumps({"image_bytes": {"b64": img}})' test.jpg &> request.json
You can try:
{"instances": [{"image_bytes": {"b64": encoded_string}, "key": "0"}]}
Related
Recently I tried to convert the model (tf1.x) to the saved_model, and followed the official migrate document. However in my use case, most of model in my hand or tensorflow model zoo usually is pb file, and according to the official document says that
There is no straightforward way to upgrade a raw Graph.pb file to TensorFlow 2.0, but if you have a "Frozen graph" (a tf.Graph where the variables have been turned into constants), then it is possible to convert this to a concrete_function using v1.wrap_function:
But I still do not understand how to converted to saved_model format.
in TF1 mode:
import tensorflow as tf
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
def convert_pb_to_server_model(pb_model_path, export_dir, input_name='input:0', output_name='output:0'):
graph_def = read_pb_model(pb_model_path)
convert_pb_saved_model(graph_def, export_dir, input_name, output_name)
def read_pb_model(pb_model_path):
with tf.gfile.GFile(pb_model_path, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
return graph_def
def convert_pb_saved_model(graph_def, export_dir, input_name='input:0', output_name='output:0'):
builder = tf.saved_model.builder.SavedModelBuilder(export_dir)
sigs = {}
with tf.Session(graph=tf.Graph()) as sess:
tf.import_graph_def(graph_def, name="")
g = tf.get_default_graph()
inp = g.get_tensor_by_name(input_name)
out = g.get_tensor_by_name(output_name)
sigs[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = \
tf.saved_model.signature_def_utils.predict_signature_def(
{"input": inp}, {"output": out})
builder.add_meta_graph_and_variables(sess,
[tag_constants.SERVING],
signature_def_map=sigs)
builder.save()
in TF2 mode:
import tensorflow as tf
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2_as_graph
from tensorflow.lite.python.util import run_graph_optimizations, get_grappler_config
import numpy as np
def frozen_keras_graph(func_model):
frozen_func, graph_def = convert_variables_to_constants_v2_as_graph(func_model)
input_tensors = [
tensor for tensor in frozen_func.inputs
if tensor.dtype != tf.resource
]
output_tensors = frozen_func.outputs
graph_def = run_graph_optimizations(
graph_def,
input_tensors,
output_tensors,
config=get_grappler_config(["constfold", "function"]),
graph=frozen_func.graph)
return graph_def
def convert_keras_model_to_pb():
keras_model = train_model()
func_model = tf.function(keras_model).get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype))
graph_def = frozen_keras_graph(func_model)
tf.io.write_graph(graph_def, '/tmp/tf_model3', 'frozen_graph.pb')
def convert_saved_model_to_pb():
model_dir = '/tmp/saved_model'
model = tf.saved_model.load(model_dir)
func_model = model.signatures["serving_default"]
graph_def = frozen_keras_graph(func_model)
tf.io.write_graph(graph_def, '/tmp/tf_model3', 'frozen_graph.pb')
Or:
def convert_saved_model_to_pb(output_node_names, input_saved_model_dir, output_graph_dir):
from tensorflow.python.tools import freeze_graph
output_node_names = ','.join(output_node_names)
freeze_graph.freeze_graph(input_graph=None, input_saver=None,
input_binary=None,
input_checkpoint=None,
output_node_names=output_node_names,
restore_op_name=None,
filename_tensor_name=None,
output_graph=output_graph_dir,
clear_devices=None,
initializer_nodes=None,
input_saved_model_dir=input_saved_model_dir)
def save_output_tensor_to_pb():
output_names = ['StatefulPartitionedCall']
save_pb_model_path = '/tmp/pb_model/freeze_graph.pb'
model_dir = '/tmp/saved_model'
convert_saved_model_to_pb(output_names, model_dir, save_pb_model_path)
To ensure whether my understanding is correct, so I also post what I learnt:
If anyone want to migrate tf1.x to tf2.x, please followed the official post first.
In tensorflow 2.0, tf.train.Saver and freeze_graph was already replaced by saved_model.
And if anyone want to convert pb model from tf1.x into saved_model, you can followed #Boluoyu answer. But if your run-time environment is above tf2.0, you can use following code:
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
def covert_pb_to_server_model(pb_model_path, export_dir, input_name='input', output_name='output'):
graph_def = read_pb_model(pb_model_path)
covert_pb_saved_model(graph_def, export_dir, input_name, output_name)
def read_pb_model(pb_model_path):
with tf.gfile.GFile(pb_model_path, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
return graph_def
def covert_pb_saved_model(graph_def, export_dir, input_name='input', output_name='output'):
builder = tf.saved_model.builder.SavedModelBuilder(export_dir)
sigs = {}
with tf.Session(graph=tf.Graph()) as sess:
tf.import_graph_def(graph_def, name="")
g = tf.get_default_graph()
inp = g.get_tensor_by_name(input_name)
out = g.get_tensor_by_name(output_name)
sigs[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = \
tf.saved_model.signature_def_utils.predict_signature_def(
{"input": inp}, {"output": out})
builder.add_meta_graph_and_variables(sess,
[tag_constants.SERVING],
signature_def_map=sigs)
builder.save()
We have trained a model using Microsoft’s Custom Vision. When we try to convert the .pb to .tflite, we run into errors. This is the python code we use is the following:
import tensorflow as tf
from tensorflow import data
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.tools import freeze_graph
from tensorflow.python import ops
from tensorflow.tools.graph_transforms import TransformGraph
graph_def_file = 'model.pb'
input_arrays = ['Placeholder']
output_arrays = ['model_outputs']
transforms = [
'remove_nodes(op=Identity)',
'merge_duplicate_nodes',
'strip_unused_nodes',
'fold_constants(ignore_errors=true)',
'fold_batch_norms'
]
def get_graph_def_from_file(graph_filepath):
with ops.Graph().as_default():
with tf.gfile.GFile(graph_filepath, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
return graph_def
graph_def = get_graph_def_from_file(graph_def_file)
optimized_graph_def = TransformGraph(graph_def, input_arrays, output_arrays, transforms)
tf.train.write_graph(optimized_graph_def, logdir='', as_text=False, name='optimized_model.pb')
optimized_graph_def_file = 'optimized_model.pb'
converter = tf.lite.TFLiteConverter.from_frozen_graph(optimized_graph_def_file,
input_arrays,
output_arrays)
converter.target_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, # We also tried with TF_OPS only and TFLITE_BUILTINS only
tf.lite.OpsSet.SELECT_TF_OPS]
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
tflite_model = converter.convert()
open('optimized_converted_model_2.tflite', 'wb').write(tflite_model)
This is the input of the network and it does a convolution fairly early.
As you can see, it appears TensorFlow does not appear to be recognizing this operation:
I have successfully trained, exported and uploaded my 'retrained_graph.pb' to ML Engine. My export script is as follows:
import tensorflow as tf
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.saved_model import builder as saved_model_builder
input_graph = 'retrained_graph.pb'
saved_model_dir = 'my_model'
with tf.Graph().as_default() as graph:
# Read in the export graph
with tf.gfile.FastGFile(input_graph, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
# Define SavedModel Signature (inputs and outputs)
in_image = graph.get_tensor_by_name('DecodeJpeg/contents:0')
inputs = {'image_bytes': tf.saved_model.utils.build_tensor_info(in_image)}
out_classes = graph.get_tensor_by_name('final_result:0')
outputs = {'prediction_bytes': tf.saved_model.utils.build_tensor_info(out_classes)}
signature = tf.saved_model.signature_def_utils.build_signature_def(
inputs=inputs,
outputs=outputs,
method_name='tensorflow/serving/predict'
)
with tf.Session(graph=graph) as sess:
# Save out the SavedModel.
b = saved_model_builder.SavedModelBuilder(saved_model_dir)
b.add_meta_graph_and_variables(sess,
[tf.saved_model.tag_constants.SERVING],
signature_def_map={'serving_default': signature})
b.save()
I build my prediction Json using the following:
# Copy the image to local disk.
gsutil cp gs://cloud-ml-data/img/flower_photos/tulips/4520577328_a94c11e806_n.jpg flower.jpg
# Create request message in json format.
python -c 'import base64, sys, json; img = base64.b64encode(open(sys.argv[1], "rb").read()); print json.dumps({"image_bytes": {"b64": img}}) ' flower.jpg &> request.json
# Call prediction service API to get classifications
gcloud ml-engine predict --model ${MODEL_NAME} --json-instances request.json
However this fails with the response:
{
"error": "Prediction failed: Error during model execution: AbortionError(code=StatusCode.INVALID_ARGUMENT, details=\"contents must be scalar, got shape [1]\n\t [[Node: Deco
deJpeg = DecodeJpeg[_output_shapes=[[?,?,3]], acceptable_fraction=1, channels=3, dct_method=\"\", fancy_upscaling=true, ratio=1, try_recover_truncated=false, _device=\"/job:l
ocalhost/replica:0/task:0/device:CPU:0\"](_arg_DecodeJpeg/contents_0_0)]]\")"
}
Any help appreciated, I'm so close I can taste it :D
Why do you have this line:
in_image = graph.get_tensor_by_name('DecodeJpeg/contents:0')?
inputs = {'image_bytes': tf.saved_model.utils.build_tensor_info(in_image)}
The shape here is scalar. Can you make sure you create input with shape [None]
https://github.com/GoogleCloudPlatform/cloudml-samples/blob/master/flowers/trainer/model.py#L364
The server will decode and batch all the inputs. So the input to your graph is essentially [base64_decode("xxx")] where you actually want to feed base64_decode("xxx") since the op takes a string type tensor. Server-side assumes the shape of input as [None, ] i.e. the first dimension can be anything for batching. In your case, [None]. You might want to create a tensor of that shape and then feed that into the op.
I am trying to convert a keras model to use it for predictions on google cloud's ml-engine. I have a pre-trained classifier that takes in a numpy array as input. The normal working data I send to model.predict is named input_data.
I convert it to base 64 and dump it to a json file using the following few lines:
data = {}
data['image_bytes'] = [{'b64':base64.b64encode(input_data.tostring())}]
with open('weights/keras/example.json', 'w') as outfile:
json.dump(data, outfile)
Now, I try to create the TF model from my existing model:
from keras.models import model_from_json
import tensorflow as tf
from keras import backend as K
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import utils
from tensorflow.python.saved_model import tag_constants, signature_constants
from tensorflow.python.saved_model.signature_def_utils_impl import build_signature_def, predict_signature_def
init = tf.global_variables_initializer()
with tf.Session() as sess:
K.set_session(sess)
sess.run(init)
print("Keras model & weights loading...")
K.set_learning_phase(0)
with open(json_file_path, 'r') as file_handle:
model = model_from_json(file_handle.read())
model.load_weights(weight_file_path)
builder = saved_model_builder.SavedModelBuilder(export_path)
raw_byte_strings = tf.placeholder(dtype=tf.string, shape=[None], name='source')
decode = lambda raw_byte_str: tf.decode_raw(raw_byte_str, tf.float32)
input_images = tf.map_fn(decode, raw_byte_strings)
print(input_images)
signature = predict_signature_def(inputs={'image_bytes': input_images},
outputs={'output': model.output})
builder.add_meta_graph_and_variables(
sess=sess,
tags=[tag_constants.SERVING],
signature_def_map={
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature
}
)
builder.save()
When I try to test this locally I get the following error:
ERROR:root:Exception during running the graph: You must feed a value for placeholder tensor 'input_1' with dtype float and shape [?,473,473,3]
[[Node: input_1 = Placeholder[dtype=DT_FLOAT, shape=[?,473,473,3], _device="/job:localhost/replica:0/task:0/device:CPU:0"]()]]
Help?
I've been following the TensorFlow for Poets 2 codelab on a model I've trained, and have created a frozen, quantized graph with embedded weights. It's captured in a single file - say my_quant_graph.pb.
Since I can use that graph for inference with the TensorFlow Android inference library just fine, I thought I could do the same with Cloud ML Engine, but it seems it only works on a SavedModel model.
How can I simply convert a frozen/quantized graph in a single pb file to use on ML engine?
It turns out that a SavedModel provides some extra info around a saved graph. Assuming a frozen graph doesn't need assets, then it needs only a serving signature specified.
Here's the python code I ran to convert my graph to a format that Cloud ML engine accepted. Note I only have a single pair of input/output tensors.
import tensorflow as tf
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
export_dir = './saved'
graph_pb = 'my_quant_graph.pb'
builder = tf.saved_model.builder.SavedModelBuilder(export_dir)
with tf.gfile.GFile(graph_pb, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
sigs = {}
with tf.Session(graph=tf.Graph()) as sess:
# name="" is important to ensure we don't get spurious prefixing
tf.import_graph_def(graph_def, name="")
g = tf.get_default_graph()
inp = g.get_tensor_by_name("real_A_and_B_images:0")
out = g.get_tensor_by_name("generator/Tanh:0")
sigs[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = \
tf.saved_model.signature_def_utils.predict_signature_def(
{"in": inp}, {"out": out})
builder.add_meta_graph_and_variables(sess,
[tag_constants.SERVING],
signature_def_map=sigs)
builder.save()
There is sample with multiple outputs nodes:
# Convert PtotoBuf model to saved_model, format for TF Serving
# https://cloud.google.com/ai-platform/prediction/docs/exporting-savedmodel-for-prediction
import shutil
import tensorflow.compat.v1 as tf
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
export_dir = './1' # TF Serving supports run different versions of same model. So we put current model to '1' folder.
graph_pb = 'frozen_inference_graph.pb'
# Clear out folder
shutil.rmtree(export_dir, ignore_errors=True)
builder = tf.saved_model.builder.SavedModelBuilder(export_dir)
with tf.io.gfile.GFile(graph_pb, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
sigs = {}
with tf.Session(graph=tf.Graph()) as sess:
# Prepare input and outputs of model
tf.import_graph_def(graph_def, name="")
g = tf.get_default_graph()
image_tensor = g.get_tensor_by_name("image_tensor:0")
num_detections = g.get_tensor_by_name("num_detections:0")
detection_scores = g.get_tensor_by_name("detection_scores:0")
detection_boxes = g.get_tensor_by_name("detection_boxes:0")
detection_classes = g.get_tensor_by_name("detection_classes:0")
sigs[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = \
tf.saved_model.signature_def_utils.predict_signature_def(
{"input_image": image_tensor},
{ "num_detections": num_detections,
"detection_scores": detection_scores,
"detection_boxes": detection_boxes,
"detection_classes": detection_classes})
builder.add_meta_graph_and_variables(sess,
[tag_constants.SERVING],
signature_def_map=sigs)
builder.save()