how import a model from .pb file - tensorflow2.0

'''
with tf.Session() as sess:
model_filename="./model/skipGram-word2Vec/saved_model.pb"
with gfile.FastGFile(model_filename,'rb') as f:
graph_def=tf.GraphDef()
graph_def.ParseFromString(f.read())
sess.graph.as_default()
result=tf.import_graph_def(graph_def,name='')
print(sess.run(result))
'''
then ,the error occured:
DecodeError: Wrong wire type in tag.

Here is how you can load the model from .pb file in tensorflow 2.0
import tensorflow as tf
GRAPH_PB_PATH = './frozen_model.pb'
with tf.compat.v1.Session() as sess:
print("load graph")
with tf.io.gfile.GFile(GRAPH_PB_PATH,'rb') as f:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read())
sess.graph.as_default()
tf.import_graph_def(graph_def, name='')
graph_nodes=[n for n in graph_def.node]
names = []
for t in graph_nodes:
names.append(t.name)
print(names)

Related

Convert frozen model(.pb) to savedmodel

Recently I tried to convert the model (tf1.x) to the saved_model, and followed the official migrate document. However in my use case, most of model in my hand or tensorflow model zoo usually is pb file, and according to the official document says that
There is no straightforward way to upgrade a raw Graph.pb file to TensorFlow 2.0, but if you have a "Frozen graph" (a tf.Graph where the variables have been turned into constants), then it is possible to convert this to a concrete_function using v1.wrap_function:
But I still do not understand how to converted to saved_model format.
in TF1 mode:
import tensorflow as tf
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
def convert_pb_to_server_model(pb_model_path, export_dir, input_name='input:0', output_name='output:0'):
graph_def = read_pb_model(pb_model_path)
convert_pb_saved_model(graph_def, export_dir, input_name, output_name)
def read_pb_model(pb_model_path):
with tf.gfile.GFile(pb_model_path, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
return graph_def
def convert_pb_saved_model(graph_def, export_dir, input_name='input:0', output_name='output:0'):
builder = tf.saved_model.builder.SavedModelBuilder(export_dir)
sigs = {}
with tf.Session(graph=tf.Graph()) as sess:
tf.import_graph_def(graph_def, name="")
g = tf.get_default_graph()
inp = g.get_tensor_by_name(input_name)
out = g.get_tensor_by_name(output_name)
sigs[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = \
tf.saved_model.signature_def_utils.predict_signature_def(
{"input": inp}, {"output": out})
builder.add_meta_graph_and_variables(sess,
[tag_constants.SERVING],
signature_def_map=sigs)
builder.save()
in TF2 mode:
import tensorflow as tf
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2_as_graph
from tensorflow.lite.python.util import run_graph_optimizations, get_grappler_config
import numpy as np
def frozen_keras_graph(func_model):
frozen_func, graph_def = convert_variables_to_constants_v2_as_graph(func_model)
input_tensors = [
tensor for tensor in frozen_func.inputs
if tensor.dtype != tf.resource
]
output_tensors = frozen_func.outputs
graph_def = run_graph_optimizations(
graph_def,
input_tensors,
output_tensors,
config=get_grappler_config(["constfold", "function"]),
graph=frozen_func.graph)
return graph_def
def convert_keras_model_to_pb():
keras_model = train_model()
func_model = tf.function(keras_model).get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype))
graph_def = frozen_keras_graph(func_model)
tf.io.write_graph(graph_def, '/tmp/tf_model3', 'frozen_graph.pb')
def convert_saved_model_to_pb():
model_dir = '/tmp/saved_model'
model = tf.saved_model.load(model_dir)
func_model = model.signatures["serving_default"]
graph_def = frozen_keras_graph(func_model)
tf.io.write_graph(graph_def, '/tmp/tf_model3', 'frozen_graph.pb')
Or:
def convert_saved_model_to_pb(output_node_names, input_saved_model_dir, output_graph_dir):
from tensorflow.python.tools import freeze_graph
output_node_names = ','.join(output_node_names)
freeze_graph.freeze_graph(input_graph=None, input_saver=None,
input_binary=None,
input_checkpoint=None,
output_node_names=output_node_names,
restore_op_name=None,
filename_tensor_name=None,
output_graph=output_graph_dir,
clear_devices=None,
initializer_nodes=None,
input_saved_model_dir=input_saved_model_dir)
def save_output_tensor_to_pb():
output_names = ['StatefulPartitionedCall']
save_pb_model_path = '/tmp/pb_model/freeze_graph.pb'
model_dir = '/tmp/saved_model'
convert_saved_model_to_pb(output_names, model_dir, save_pb_model_path)
To ensure whether my understanding is correct, so I also post what I learnt:
If anyone want to migrate tf1.x to tf2.x, please followed the official post first.
In tensorflow 2.0, tf.train.Saver and freeze_graph was already replaced by saved_model.
And if anyone want to convert pb model from tf1.x into saved_model, you can followed #Boluoyu answer. But if your run-time environment is above tf2.0, you can use following code:
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
def covert_pb_to_server_model(pb_model_path, export_dir, input_name='input', output_name='output'):
graph_def = read_pb_model(pb_model_path)
covert_pb_saved_model(graph_def, export_dir, input_name, output_name)
def read_pb_model(pb_model_path):
with tf.gfile.GFile(pb_model_path, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
return graph_def
def covert_pb_saved_model(graph_def, export_dir, input_name='input', output_name='output'):
builder = tf.saved_model.builder.SavedModelBuilder(export_dir)
sigs = {}
with tf.Session(graph=tf.Graph()) as sess:
tf.import_graph_def(graph_def, name="")
g = tf.get_default_graph()
inp = g.get_tensor_by_name(input_name)
out = g.get_tensor_by_name(output_name)
sigs[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = \
tf.saved_model.signature_def_utils.predict_signature_def(
{"input": inp}, {"output": out})
builder.add_meta_graph_and_variables(sess,
[tag_constants.SERVING],
signature_def_map=sigs)
builder.save()

Can I get a Keras model from a .pb file?

I have a ptocolbuffer file for a VGG model.I need to generate the keras model. Can I use the .pb file to generate Keras model so that I can use the Keras features like summary?
I loaded the .pb file in tf and generate model cofigs and layer weights.
import tensorflow as tf
from tensorflow.python.platform import gfile
from tensorflow.python.framework import tensor_util
GRAPH_PB_PATH = './small_vgg_tf_graph.pb'
with tf.Session() as sess:
print("load graph")
with gfile.FastGFile(GRAPH_PB_PATH,'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
sess.graph.as_default()
tf.import_graph_def(graph_def, name='')
graph_nodes=[n for n in graph_def.node]
names = []
for t in graph_nodes:
names.append(t.name)
with open('names.txt','w') as fh:
for i in names:
fh.write(str(i)+'\n')
weight_nodes = [n for n in graph_def.node if n.op == 'Const']
with open('weights.txt','w') as fh:
for n in weight_nodes:
fh.write("Name of the node - %s" % n.name+'\n')
fh.write("Value - " )
fh.write(str(tensor_util.MakeNdarray(n.attr['value'].tensor))+'\n')
with open('ops.txt','w') as fh:
for op in sess.graph.get_operations():
fh.write(str(op)+'\n')
I want to generate Keras like summary file

How can I know the output and input tensor names in a saved model

I know how to load a saved TensorFlow model but how will I know the input and output tensor names.
I can load a protobuf file using tf.import_graph_def and then load the tensors using function get_tensor_by_name but how will I know the tensor names of any pre-trained model. Do I need to check their documentation or is there any other way.
Assuming that the input and output tensors are placeholders, something like this should be helpful for you:
X = np.ones((1,3), dtype=np.float32)
tf.reset_default_graph()
model_saver = tf.train.Saver(defer_build=True)
input_pl = tf.placeholder(tf.float32, shape=[1,3], name="Input")
w = tf.Variable(tf.random_normal([3,3], stddev=0.01), name="Weight")
b = tf.Variable(tf.zeros([3]), name="Bias")
output = tf.add(tf.matmul(input_pl, w), b)
model_saver.build()
sess = tf.Session()
sess.run(tf.global_variables_initializer())
model_saver.save(sess, "./model.ckpt")
Now, that the graph is built and saved, we can see the placeholder names like this:
model_loader = tf.train.Saver()
sess = tf.Session()
model_loader.restore(sess, "./model.ckpt")
placeholders = [x for x in tf.get_default_graph().get_operations() if x.type == "Placeholder"]
# [<tf.Operation 'Input' type=Placeholder>]
You can check the names and the input list for each operation in your graph to find the names of the tensors.
with tf.gfile.GFile(input_model_filepath, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def)
for op in graph.get_operations():
print(op.name, [inp for inp in op.inputs])
Solution only for inputs:
# read pb into graph_def
with tf.gfile.GFile(input_model_filepath, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
# import graph_def
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def)
# print operations
for op in graph.get_operations():
if op.type == "Placeholder":
print(op.name)

Tensorflow: error trying to restore a model in a pb file

I'm trying to load an already trained model taken from https://github.com/tensorflow/models/tree/master/official/resnet, but when I try to load the .pb I get an error on ParseFromString method:
import tensorflow as tf
from tensorflow.python.platform import gfile
GRAPH_PB_PATH = '../resnet_v2_fp32_savedmodel_NHWC/1538687283/saved_model.pb'
with tf.gfile.FastGFile(GRAPH_PB_PATH, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
g_in = tf.import_graph_def(graph_def, name="")
sess = tf.Session(graph=g_in)
DecodeError: Error parsing message
What I wrong?
I was having a similar problem, instead of using gfile I use the tf.saved_model.loader.load function like in this post https://stackoverflow.com/a/46547595/4637693:
sess = tf.Session(graph=tf.Graph())
model = tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], './model')
graph_def = model.graph_def

Tensorflow. Could not open Checkpoint

I have built and trained a tensorflow model, but unfortunately, the checkpoint file cannot be open as shown below by an error.
Now there isn't an error, but a bunch of warnings that don't really tell you anything.
This happens when I run the evalutation code:
import tensorflow as tf
import main
import Process
import Input
eval_dir = "/Users/Zanhuang/Desktop/NNP"
checkpoint_dir = "/Users/Zanhuang/Desktop/NNP"
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
def evaluate():
with tf.Graph().as_default() as g:
images, labels = Process.eval_inputs()
forward_propgation_results = Process.forward_propagation(images)
init_op = tf.initialize_all_variables()
saver = tf.train.Saver()
top_k_op = tf.nn.in_top_k(forward_propgation_results, labels, 1)
with tf.Session(graph = g) as sess:
tf.train.start_queue_runners(sess = sess)
sess.run(init_op)
saver.restore(sess, eval_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
for i in range(100):
print(sess.run(top_k_op))
def main(argv = None):
evaluate()
if __name__ == '__main__':
tf.app.run()
The next is how I had generated the checkpoint file:
if step % 2 == 0:
checkpoint_path = os.path.join(FLAGS.data_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step = step)