Convert frozen model(.pb) to savedmodel - tensorflow

Recently I tried to convert the model (tf1.x) to the saved_model, and followed the official migrate document. However in my use case, most of model in my hand or tensorflow model zoo usually is pb file, and according to the official document says that
There is no straightforward way to upgrade a raw Graph.pb file to TensorFlow 2.0, but if you have a "Frozen graph" (a tf.Graph where the variables have been turned into constants), then it is possible to convert this to a concrete_function using v1.wrap_function:
But I still do not understand how to converted to saved_model format.

in TF1 mode:
import tensorflow as tf
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
def convert_pb_to_server_model(pb_model_path, export_dir, input_name='input:0', output_name='output:0'):
graph_def = read_pb_model(pb_model_path)
convert_pb_saved_model(graph_def, export_dir, input_name, output_name)
def read_pb_model(pb_model_path):
with tf.gfile.GFile(pb_model_path, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
return graph_def
def convert_pb_saved_model(graph_def, export_dir, input_name='input:0', output_name='output:0'):
builder = tf.saved_model.builder.SavedModelBuilder(export_dir)
sigs = {}
with tf.Session(graph=tf.Graph()) as sess:
tf.import_graph_def(graph_def, name="")
g = tf.get_default_graph()
inp = g.get_tensor_by_name(input_name)
out = g.get_tensor_by_name(output_name)
sigs[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = \
tf.saved_model.signature_def_utils.predict_signature_def(
{"input": inp}, {"output": out})
builder.add_meta_graph_and_variables(sess,
[tag_constants.SERVING],
signature_def_map=sigs)
builder.save()
in TF2 mode:
import tensorflow as tf
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2_as_graph
from tensorflow.lite.python.util import run_graph_optimizations, get_grappler_config
import numpy as np
def frozen_keras_graph(func_model):
frozen_func, graph_def = convert_variables_to_constants_v2_as_graph(func_model)
input_tensors = [
tensor for tensor in frozen_func.inputs
if tensor.dtype != tf.resource
]
output_tensors = frozen_func.outputs
graph_def = run_graph_optimizations(
graph_def,
input_tensors,
output_tensors,
config=get_grappler_config(["constfold", "function"]),
graph=frozen_func.graph)
return graph_def
def convert_keras_model_to_pb():
keras_model = train_model()
func_model = tf.function(keras_model).get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype))
graph_def = frozen_keras_graph(func_model)
tf.io.write_graph(graph_def, '/tmp/tf_model3', 'frozen_graph.pb')
def convert_saved_model_to_pb():
model_dir = '/tmp/saved_model'
model = tf.saved_model.load(model_dir)
func_model = model.signatures["serving_default"]
graph_def = frozen_keras_graph(func_model)
tf.io.write_graph(graph_def, '/tmp/tf_model3', 'frozen_graph.pb')
Or:
def convert_saved_model_to_pb(output_node_names, input_saved_model_dir, output_graph_dir):
from tensorflow.python.tools import freeze_graph
output_node_names = ','.join(output_node_names)
freeze_graph.freeze_graph(input_graph=None, input_saver=None,
input_binary=None,
input_checkpoint=None,
output_node_names=output_node_names,
restore_op_name=None,
filename_tensor_name=None,
output_graph=output_graph_dir,
clear_devices=None,
initializer_nodes=None,
input_saved_model_dir=input_saved_model_dir)
def save_output_tensor_to_pb():
output_names = ['StatefulPartitionedCall']
save_pb_model_path = '/tmp/pb_model/freeze_graph.pb'
model_dir = '/tmp/saved_model'
convert_saved_model_to_pb(output_names, model_dir, save_pb_model_path)

To ensure whether my understanding is correct, so I also post what I learnt:
If anyone want to migrate tf1.x to tf2.x, please followed the official post first.
In tensorflow 2.0, tf.train.Saver and freeze_graph was already replaced by saved_model.
And if anyone want to convert pb model from tf1.x into saved_model, you can followed #Boluoyu answer. But if your run-time environment is above tf2.0, you can use following code:
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
def covert_pb_to_server_model(pb_model_path, export_dir, input_name='input', output_name='output'):
graph_def = read_pb_model(pb_model_path)
covert_pb_saved_model(graph_def, export_dir, input_name, output_name)
def read_pb_model(pb_model_path):
with tf.gfile.GFile(pb_model_path, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
return graph_def
def covert_pb_saved_model(graph_def, export_dir, input_name='input', output_name='output'):
builder = tf.saved_model.builder.SavedModelBuilder(export_dir)
sigs = {}
with tf.Session(graph=tf.Graph()) as sess:
tf.import_graph_def(graph_def, name="")
g = tf.get_default_graph()
inp = g.get_tensor_by_name(input_name)
out = g.get_tensor_by_name(output_name)
sigs[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = \
tf.saved_model.signature_def_utils.predict_signature_def(
{"input": inp}, {"output": out})
builder.add_meta_graph_and_variables(sess,
[tag_constants.SERVING],
signature_def_map=sigs)
builder.save()

Related

Missing ops for a model created in Microsoft Custom Vision and exported to tflite

We have trained a model using Microsoft’s Custom Vision. When we try to convert the .pb to .tflite, we run into errors. This is the python code we use is the following:
import tensorflow as tf
from tensorflow import data
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.tools import freeze_graph
from tensorflow.python import ops
from tensorflow.tools.graph_transforms import TransformGraph
graph_def_file = 'model.pb'
input_arrays = ['Placeholder']
output_arrays = ['model_outputs']
transforms = [
'remove_nodes(op=Identity)',
'merge_duplicate_nodes',
'strip_unused_nodes',
'fold_constants(ignore_errors=true)',
'fold_batch_norms'
]
def get_graph_def_from_file(graph_filepath):
with ops.Graph().as_default():
with tf.gfile.GFile(graph_filepath, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
return graph_def
graph_def = get_graph_def_from_file(graph_def_file)
optimized_graph_def = TransformGraph(graph_def, input_arrays, output_arrays, transforms)
tf.train.write_graph(optimized_graph_def, logdir='', as_text=False, name='optimized_model.pb')
optimized_graph_def_file = 'optimized_model.pb'
converter = tf.lite.TFLiteConverter.from_frozen_graph(optimized_graph_def_file,
input_arrays,
output_arrays)
converter.target_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, # We also tried with TF_OPS only and TFLITE_BUILTINS only
tf.lite.OpsSet.SELECT_TF_OPS]
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
tflite_model = converter.convert()
open('optimized_converted_model_2.tflite', 'wb').write(tflite_model)
This is the input of the network and it does a convolution fairly early.
As you can see, it appears TensorFlow does not appear to be recognizing this operation:

Get predictions from deployed TF for poet model in google cloud

I deployed my retrained TF for poet model in google cloud. currently, I am trying to get predictions from that. But it gives the following error.
"error": "Prediction failed: Error during model execution: AbortionError(code=StatusCode.INVALID_ARGUMENT, details=\"contents must be scalar, got shape [1]\n\t [[{{node DecodeJpeg}}]]\")"
Following code, I used to get serving model
import tensorflow as tf
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.saved_model import builder as saved_model_builder
input_graph = 'retrained_graph.pb'
saved_model_dir = 'my_model'
with tf.Graph().as_default() as graph:
# Read in the export graph
with tf.gfile.FastGFile(input_graph, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
# Define SavedModel Signature (inputs and outputs)
in_image = graph.get_tensor_by_name('DecodeJpeg/contents:0')
inputs = {'image_bytes': tf.saved_model.utils.build_tensor_info(in_image)}
out_classes = graph.get_tensor_by_name('final_result:0')
outputs = {'prediction': tf.saved_model.utils.build_tensor_info(out_classes)}
signature = tf.saved_model.signature_def_utils.build_signature_def(
inputs=inputs,
outputs=outputs,
method_name='tensorflow/serving/predict'
)
with tf.Session(graph=graph) as sess:
# Save out the SavedModel.
b = saved_model_builder.SavedModelBuilder(saved_model_dir)
b.add_meta_graph_and_variables(sess,[tf.saved_model.tag_constants.SERVING],signature_def_map={'serving_default': signature})
b.save()
request.json generating code
python -c 'import base64, sys, json; img = base64.b64encode(open(sys.argv[1], "rb").read()); print json.dumps({"image_bytes": {"b64": img}})' test.jpg &> request.json
You can try:
{"instances": [{"image_bytes": {"b64": encoded_string}, "key": "0"}]}

Tensorflow: error trying to restore a model in a pb file

I'm trying to load an already trained model taken from https://github.com/tensorflow/models/tree/master/official/resnet, but when I try to load the .pb I get an error on ParseFromString method:
import tensorflow as tf
from tensorflow.python.platform import gfile
GRAPH_PB_PATH = '../resnet_v2_fp32_savedmodel_NHWC/1538687283/saved_model.pb'
with tf.gfile.FastGFile(GRAPH_PB_PATH, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
g_in = tf.import_graph_def(graph_def, name="")
sess = tf.Session(graph=g_in)
DecodeError: Error parsing message
What I wrong?
I was having a similar problem, instead of using gfile I use the tf.saved_model.loader.load function like in this post https://stackoverflow.com/a/46547595/4637693:
sess = tf.Session(graph=tf.Graph())
model = tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], './model')
graph_def = model.graph_def

Keras model to TensorFlow Server Model

I get an error when accessing
{ "error": "Generic conv implementation does not support grouped convolutions for now.\n\t [[{{node model_1/conv2d_1/Conv2D}}]]" }
I translate the model to TF server
import tensorflow as tf
tf.keras.backend.set_learning_phase(0)
model = tf.keras.models.load_model(r'model.h5')
export_path = 'my_image_classifier/1'
with tf.keras.backend.get_session() as sess:
tf.saved_model.simple_save(
sess,
export_path,
inputs={'input_image': model.input},
outputs={t.name: t for t in model.outputs})
What do i do? I need a model on the server
System Ubuntu 18.04
TF server 1.12(Docker)
Keras 1.2.4
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.saved_model.signature_def_utils_impl import predict_signature_def
from tensorflow.keras import backend as K
import tensorflow as tf
export_path = 'model'
sess = tf.Session()
K.set_session(sess)
K.set_learning_phase(0)
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = tf.keras.models.model_from_json(loaded_model_json)
model.load_weights('01.h5')
model.summary()
builder = saved_model_builder.SavedModelBuilder(export_path)
signature = predict_signature_def(inputs={'input_image': model.get_layer(name='the_input').input},
outputs={'out': model.get_layer(name='the_output').output})
with K.get_session() as sess:
builder.add_meta_graph_and_variables(sess=sess,
tags=[tag_constants.SERVING],
signature_def_map={'predict': signature},
strip_default_attrs=True)
builder.save()
The main thing is to include
strip_default_attrs=True

Convert a graph proto (pb/pbtxt) to a SavedModel for use in TensorFlow Serving or Cloud ML Engine

I've been following the TensorFlow for Poets 2 codelab on a model I've trained, and have created a frozen, quantized graph with embedded weights. It's captured in a single file - say my_quant_graph.pb.
Since I can use that graph for inference with the TensorFlow Android inference library just fine, I thought I could do the same with Cloud ML Engine, but it seems it only works on a SavedModel model.
How can I simply convert a frozen/quantized graph in a single pb file to use on ML engine?
It turns out that a SavedModel provides some extra info around a saved graph. Assuming a frozen graph doesn't need assets, then it needs only a serving signature specified.
Here's the python code I ran to convert my graph to a format that Cloud ML engine accepted. Note I only have a single pair of input/output tensors.
import tensorflow as tf
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
export_dir = './saved'
graph_pb = 'my_quant_graph.pb'
builder = tf.saved_model.builder.SavedModelBuilder(export_dir)
with tf.gfile.GFile(graph_pb, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
sigs = {}
with tf.Session(graph=tf.Graph()) as sess:
# name="" is important to ensure we don't get spurious prefixing
tf.import_graph_def(graph_def, name="")
g = tf.get_default_graph()
inp = g.get_tensor_by_name("real_A_and_B_images:0")
out = g.get_tensor_by_name("generator/Tanh:0")
sigs[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = \
tf.saved_model.signature_def_utils.predict_signature_def(
{"in": inp}, {"out": out})
builder.add_meta_graph_and_variables(sess,
[tag_constants.SERVING],
signature_def_map=sigs)
builder.save()
There is sample with multiple outputs nodes:
# Convert PtotoBuf model to saved_model, format for TF Serving
# https://cloud.google.com/ai-platform/prediction/docs/exporting-savedmodel-for-prediction
import shutil
import tensorflow.compat.v1 as tf
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
export_dir = './1' # TF Serving supports run different versions of same model. So we put current model to '1' folder.
graph_pb = 'frozen_inference_graph.pb'
# Clear out folder
shutil.rmtree(export_dir, ignore_errors=True)
builder = tf.saved_model.builder.SavedModelBuilder(export_dir)
with tf.io.gfile.GFile(graph_pb, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
sigs = {}
with tf.Session(graph=tf.Graph()) as sess:
# Prepare input and outputs of model
tf.import_graph_def(graph_def, name="")
g = tf.get_default_graph()
image_tensor = g.get_tensor_by_name("image_tensor:0")
num_detections = g.get_tensor_by_name("num_detections:0")
detection_scores = g.get_tensor_by_name("detection_scores:0")
detection_boxes = g.get_tensor_by_name("detection_boxes:0")
detection_classes = g.get_tensor_by_name("detection_classes:0")
sigs[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = \
tf.saved_model.signature_def_utils.predict_signature_def(
{"input_image": image_tensor},
{ "num_detections": num_detections,
"detection_scores": detection_scores,
"detection_boxes": detection_boxes,
"detection_classes": detection_classes})
builder.add_meta_graph_and_variables(sess,
[tag_constants.SERVING],
signature_def_map=sigs)
builder.save()