I'm trying to convert the tensorflow weights to tensorflow lite. First of all, I converted.tflite from generating TensorFlow SavedModel. I do this in Google colab. Can I execute this code?
%cd /content/tensorflow-yolov4-tflite
!python convert_tflite.py --weights ./checkpoints/yolov4-tiny-pretflite-416 --output ./checkpoints/yolov4-tiny-416.tflite
then the result
`tensorflow.lite.python.convert_phase.ConverterError`: Variable constant folding is failed. Please consider using enabling `experimental_enable_resource_variables` flag in the TFLite converter object. For example, converter.experimental_enable_resource_variables = True
btw, this my file convert_tflite.py
import tensorflow as tf
from absl import app, flags, logging
from absl.flags import FLAGS
import numpy as np
import cv2
from core.yolov4 import YOLOv4, YOLOv3, YOLOv3_tiny, decode
import core.utils as utils
import os
from core.config import cfg
flags.DEFINE_string('weights', './checkpoints/yolov4-416', 'path to weights file')
flags.DEFINE_string('output', './checkpoints/yolov4-416-fp32.tflite', 'path to output')
flags.DEFINE_integer('input_size', 416, 'path to output')
flags.DEFINE_string('quantize_mode', 'float32', 'quantize mode (int8, float16, float32)')
flags.DEFINE_string('dataset', "/Volumes/Elements/data/coco_dataset/coco/5k.txt", 'path to dataset')
def representative_data_gen():
fimage = open(FLAGS.dataset).read().split()
for input_value in range(10):
if os.path.exists(fimage[input_value]):
original_image=cv2.imread(fimage[input_value])
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
image_data = utils.image_preprocess(np.copy(original_image), [FLAGS.input_size, FLAGS.input_size])
img_in = image_data[np.newaxis, ...].astype(np.float32)
print("calibration image {}".format(fimage[input_value]))
yield [img_in]
else:
continue
def save_tflite():
converter = tf.lite.TFLiteConverter.from_saved_model(FLAGS.weights)
if FLAGS.quantize_mode == 'float16':
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.compat.v1.lite.constants.FLOAT16]
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]
converter.allow_custom_ops = True
elif FLAGS.quantize_mode == 'int8':
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]
converter.allow_custom_ops = True
converter.representative_dataset = representative_data_gen
tflite_model = converter.convert()
open(FLAGS.output, 'wb').write(tflite_model)
logging.info("model saved to: {}".format(FLAGS.output))
def demo():
interpreter = tf.lite.Interpreter(model_path=FLAGS.output)
interpreter.allocate_tensors()
logging.info('tflite model loaded')
input_details = interpreter.get_input_details()
print(input_details)
output_details = interpreter.get_output_details()
print(output_details)
input_shape = input_details[0]['shape']
input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
output_data = [interpreter.get_tensor(output_details[i]['index']) for i in range(len(output_details))]
print(output_data)
def main(_argv):
save_tflite()
demo()
if __name__ == '__main__':
try:
app.run(main)
except SystemExit:
pass
I save this file in folder tensorflow-yolov4 tflite.
Can someone know how to solve this problem? I need you guys. Thank you
Add two lines before tflite_model = converter.convert() in save_tflite() function like this
converter.experimental_enable_resource_variables = True
converter.experimental_new_converter = True
tflite_model = converter.convert()
Related
The TFGPT2LMHeadModel convertion to TFlite renders unexpected input and output shape
as oppoed to the pre trained model gpt2-64.tflite , how can we fix the same ?
!wget https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-64.tflite
import numpy as np
import tensorflow as tf
tflite_model_path = 'gpt2-64.tflite'
# Load the TFLite model and allocate tensors
interpreter = tf.lite.Interpreter(model_path=tflite_model_path)
interpreter.allocate_tensors()
# Get input and output tensors
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
input_shape = input_details[0]['shape']
#print the output
input_data = np.array(np.random.random_sample((input_shape)), dtype=np.int32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
print(output_data.shape)
print(input_shape)
Gives output as
>(1, 64, 50257)
> [ 1 64]
which is as expected
but when we try to convert TFGPT2LMHeadModel to TFLITE , we get different output as below
import tensorflow as tf
from transformers import TFGPT2LMHeadModel
import numpy as np
model = TFGPT2LMHeadModel.from_pretrained('gpt2') # or 'distilgpt2'
input_spec = tf.TensorSpec([1, 64], tf.int32)
model._set_inputs(input_spec, training=False)
converter = tf.lite.TFLiteConverter.from_keras_model(model)
# For FP16 quantization:
# converter.optimizations = [tf.lite.Optimize.DEFAULT]
# converter.target_spec.supported_types = [tf.float16]
tflite_model = converter.convert()
open("gpt2-64-2.tflite", "wb").write(tflite_model)
tflite_model_path = 'gpt2-64-2.tflite'
# Load the TFLite model and allocate tensors
interpreter = tf.lite.Interpreter(model_path=tflite_model_path)
interpreter.allocate_tensors()
# Get input and output tensors
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
input_shape = input_details[0]['shape']
#print the output
input_data = np.array(np.random.random_sample((input_shape)), dtype=np.int32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
print(output_data.shape)
print(input_shape)
Output:
>(2, 1, 12, 1, 64)
>[1 1]
How can we fix the same ?
I am trying to take a simple keras model with an Add operation and convert to TFLite and then to EdgeTPU.
Quantization for int8 needs to take place, but depending on the conversion parameters provided it results in either an unsupported operation FlexAddV2, or unsupported data type int32, or an error with AddV2 Error code: ERROR_NEEDS_FLEX_OPS.
The model and conversion are relatively simple and straightforward:
from tensorflow import keras
import numpy as np
import random
def representative_dataset():
for _ in range(100):
#data = random.randint(0, 1)
#yield [data]
data = np.random.rand(32)*2
yield [data.astype(np.int8)]
input = keras.Input(shape=(32,), name="dummy_input", dtype=tf.int8)
output = tf.add(input, 1)
model = keras.Model(inputs=input, outputs=output)
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS_INT8, # enable TensorFlow Lite ops.
tf.lite.OpsSet.TFLITE_BUILTINS, # enable TensorFlow Lite ops.
tf.lite.OpsSet.SELECT_TF_OPS # enable TensorFlow ops.
]
converter.target_spec.supported_types = [tf.int8]
converter.inference_input_type = tf.int8 # or tf.uint8
converter.inference_output_type = tf.int8 # or tf.uint8
converter.experimental_new_quantizer = True # It will enable conversion and quantization of MLIR ops
converter.experimental_new_converter = False
tflite_quant_model = converter.convert()
Output from running the conversion:
Traceback (most recent call last):
File "/home/gsosnow/doc/gt2tf.py", line 27, in
tflite_quant_model = converter.convert()
File "/home/gsosnow/anaconda3/lib/python3.9/site-packages/tensorflow/lite/python/lite.py", line 929, in wrapper
return self._convert_and_export_metrics(convert_func, *args, **kwargs)
File "/home/gsosnow/anaconda3/lib/python3.9/site-packages/tensorflow/lite/python/lite.py", line 908, in _convert_and_export_metrics
result = convert_func(self, *args, **kwargs)
File "/home/gsosnow/anaconda3/lib/python3.9/site-packages/tensorflow/lite/python/lite.py", line 1338, in convert
saved_model_convert_result = self._convert_as_saved_model()
File "/home/gsosnow/anaconda3/lib/python3.9/site-packages/tensorflow/lite/python/lite.py", line 1320, in _convert_as_saved_model
return super(TFLiteKerasModelConverterV2,
File "/home/gsosnow/anaconda3/lib/python3.9/site-packages/tensorflow/lite/python/lite.py", line 1131, in convert
result = _convert_graphdef(
File "/home/gsosnow/anaconda3/lib/python3.9/site-packages/tensorflow/lite/python/convert_phase.py", line 212, in wrapper
raise converter_error from None # Re-throws the exception.
File "/home/gsosnow/anaconda3/lib/python3.9/site-packages/tensorflow/lite/python/convert_phase.py", line 205, in wrapper
return func(*args, **kwargs)
File "/home/gsosnow/anaconda3/lib/python3.9/site-packages/tensorflow/lite/python/convert.py", line 794, in convert_graphdef
data = convert(
File "/home/gsosnow/anaconda3/lib/python3.9/site-packages/tensorflow/lite/python/convert.py", line 311, in convert
raise converter_error
tensorflow.lite.python.convert_phase.ConverterError: /home/gsosnow/anaconda3/lib/python3.9/site-packages/tensorflow/python/saved_model/save.py:1325:0: error: 'tf.AddV2' op is neither a custom op nor a flex op
:0: note: loc(fused["PartitionedCall:", "PartitionedCall"]): called from
/home/gsosnow/anaconda3/lib/python3.9/site-packages/tensorflow/python/saved_model/save.py:1325:0: note: Error code: ERROR_NEEDS_FLEX_OPS
:0: error: failed while converting: 'main':
Some ops are not supported by the native TFLite runtime, you can enable TF kernels fallback using TF Select. See instructions: https://www.tensorflow.org/lite/guide/ops_select
TF Select ops: AddV2
Details:
tf.AddV2(tensor<?x32xi8>, tensor) -> (tensor<?x32xi8>) : {device = ""}
This was resolved here:
https://github.com/google-coral/edgetpu/issues/655
Here is the python conversion code to accomplish this:
import tensorflow as tf
from tensorflow import keras
import numpy as np
import random
def representative_dataset():
for _ in range(100):
#data = random.randint(0, 1)
#yield [data]
data = np.random.rand(32)*2
yield [data.astype(np.float32)]
input = keras.Input(shape=(32,), name="dummy_input", dtype=tf.float32)
output = tf.add(input, 1)
# output = tf.keras.layers.Add()([input, input])
model = keras.Model(inputs=input, outputs=output)
print(model.summary())
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.target_spec.supported_types = [tf.int8]
converter.inference_input_type = tf.int8 # or tf.uint8
converter.inference_output_type = tf.int8 # or tf.uint8
tflite_quant_model = converter.convert()
I'm trying to run a super resolution model on Coral TPU. After integer only quantization, noticed a strong color degradation or gray overcast over SR images, even though they do look sharper than bicubic upscaled ones. Can anyone give me some tips on how to improve it? Code example pasted below. All images are from a super resolution training dataset. An example is linked here.
Top: SR image, Bot: Bicubic upscaled image
import tensorflow as tf
import tensorflow_hub as hub
import matplotlib.pyplot as plt
print(tf.__version__)
# set up representative dataset for quantization
rep_path = r'C:\Users\xxx\Downloads\BSR_bsds500\BSR\BSDS500\data\images'
rep_ds = tf.keras.utils.image_dataset_from_directory(
rep_path,
seed=123,
image_size=(256, 256),
batch_size=1)
def representative_data_gen():
for image_batch, _ in rep_ds:
yield [image_batch]
# load tf SR model and convert to tf lite with quantization.
model = hub.load("https://tfhub.dev/captain-pool/esrgan-tf2/1")
concrete_func = model.signatures[tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
#tf.function(input_signature=[tf.TensorSpec(shape=[1, 256, 256, 3], dtype=tf.float32)])
def f(input):
return concrete_func(input);
converter = tf.lite.TFLiteConverter.from_concrete_functions([f.get_concrete_function()], model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_data_gen
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.uint8
converter.inference_output_type = tf.uint8
tflite_model_quant = converter.convert()
with tf.io.gfile.GFile('ESRGAN_quant.tflite', 'wb') as f:
f.write(tflite_model_quant)
# run inference for a test image
test_img_path = r'C:\Users\xxx\Downloads\24004.jpg'
lr = tf.io.read_file(test_img_path)
lr = tf.image.decode_jpeg(lr)
lr = tf.expand_dims(lr, axis=0)
lr = tf.cast(lr, tf.uint8)
# Load TFLite model and allocate tensors.
interpreter = tf.lite.Interpreter(model_path='.\ESRGAN_quant.tflite')
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
interpreter.set_tensor(input_details[0]['index'], lr)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
sr = tf.squeeze(output_data, axis=0)
sr = tf.clip_by_value(sr, 0, 255)
sr = tf.round(sr)
sr = tf.cast(sr, tf.uint8)
plt.figure(figsize=(30, 30))
plt.subplot(2, 1, 1)
plt.title(f'ESRGAN (x4)')
plt.imshow(sr.numpy());
bicubic = tf.image.resize(lr, [1024, 1024], tf.image.ResizeMethod.BICUBIC)
bicubic = tf.cast(bicubic, tf.uint8)
plt.subplot(2, 1, 2)
plt.title('Bicubic')
plt.imshow(bicubic.numpy()[0]);
I have a saved tensorflow model the same as all models in the model zoo.
I want to convert it to tesorflow lite, I find the following way from tensorflow github (my tensorflw version is 2):
!wget http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_resnet50_v1_fpn_640x640_coco17_tpu-8.tar.gz
# extract the downloaded file
!tar -xzvf ssd_resnet50_v1_fpn_640x640_coco17_tpu-8.tar.gz
!pip install tf-nightly
import tensorflow as tf
converter = tf.lite.TFLiteConverter.from_saved_model('ssd_mobilenet_v2_320x320_coco17_tpu-8/saved_model')
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.experimental_new_converter = True
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]
tflite_model = converter.convert()
open("m.tflite", "wb").write(tflite_model)
But the output and input shape of the converted model don't match the original model, check the following:
Original Model Input & Output shape
Converted Model Input & Output shape
So there is a problem here! the input / output shape should be matched the original model!
Any idea?
From Tensorflow github issues, I used their answer to solve my problem.
Link
Their approach:
!pip install tf-nightly
import tensorflow as tf
## TFLite Conversion
model = tf.saved_model.load("saved_model")
concrete_func = model.signatures[tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
concrete_func.inputs[0].set_shape([1, 300, 300, 3])
tf.saved_model.save(model, "saved_model_updated", signatures={"serving_default":concrete_func})
converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir='saved_model_updated', signature_keys=['serving_default'])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]
tflite_model = converter.convert()
## TFLite Interpreter to check input shape
interpreter = tf.lite.Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Test the model on random input data.
input_shape = input_details[0]['shape']
print(input_shape)
[ 1 300 300 3]
Thank you MeghnaNatraj
The shape of both models input and output should be the same as shown below
If the model is already in saved_model format, you the code below
# if you are using same model
export_dir = 'ssd_mobilenet_v2_320x320_coco17_tpu-8/saved_model'
converter = tf.lite.TFLiteConverter.from_saved_model(export_dir)
if your model is in Keras format, use the format below
# if it's a keras model
model = tf.keras.applications.MobileNetV2(weights="imagenet", input_shape= (224, 224, 3))
converter = tf.lite.TFLiteConverter.from_keras_model(model)
In both cases, the intention is to get the converter.
I don't have the saved_model, so I will use keras model and convert it to saved_model format just use the Keras model format as an example
import pathlib #to use path
model = tf.keras.applications.MobileNetV2(weights="imagenet", input_shape= (224, 224, 3))
export_dir = 'imagenet/saved_model'
tf.saved_model.save(model, export_dir) #convert keras to saved model
converter = tf.lite.TFLiteConverter.from_saved_model(export_dir)
converter.optimizations = [tf.lite.Optimize.DEFAULT] #you can also optimize for size or latency OPTIMIZE_FOR_SIZE, OPTIMIZE_FOR_LATENCY
tflite_model = converter.convert()
#save the model
tflite_model_file = pathlib.Path('m.tflite')
tflite_model_file.write_bytes(tflite_model)
tflite_interpreter = tf.lite.Interpreter(model_path= 'm.tflite') #you can load the content with model_content=tflite_model
# get shape of tflite input and output
input_details = tflite_interpreter.get_input_details()
output_details = tflite_interpreter.get_output_details()
print("Input: {}".format( input_details[0]['shape']))
print("Output:{}".format(output_details[0]['shape']))
# get shape of the origin model
print("Input: {}".format( model.input.shape))
print("Output: {}".format(model.output.shape))
For the tflite: I have this
For the Original Model I have this
You will see the shape of both tflite and keras model are the same
Just reshape your input tensor.
You can use the resize_tensor_input function, like this:
interpreter.resize_tensor_input(input_index=0, tensor_size=[1, 640, 640, 3])
Now you input shape will be: [1, 640, 640, 3].
We have trained a model using Microsoft’s Custom Vision. When we try to convert the .pb to .tflite, we run into errors. This is the python code we use is the following:
import tensorflow as tf
from tensorflow import data
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.tools import freeze_graph
from tensorflow.python import ops
from tensorflow.tools.graph_transforms import TransformGraph
graph_def_file = 'model.pb'
input_arrays = ['Placeholder']
output_arrays = ['model_outputs']
transforms = [
'remove_nodes(op=Identity)',
'merge_duplicate_nodes',
'strip_unused_nodes',
'fold_constants(ignore_errors=true)',
'fold_batch_norms'
]
def get_graph_def_from_file(graph_filepath):
with ops.Graph().as_default():
with tf.gfile.GFile(graph_filepath, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
return graph_def
graph_def = get_graph_def_from_file(graph_def_file)
optimized_graph_def = TransformGraph(graph_def, input_arrays, output_arrays, transforms)
tf.train.write_graph(optimized_graph_def, logdir='', as_text=False, name='optimized_model.pb')
optimized_graph_def_file = 'optimized_model.pb'
converter = tf.lite.TFLiteConverter.from_frozen_graph(optimized_graph_def_file,
input_arrays,
output_arrays)
converter.target_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, # We also tried with TF_OPS only and TFLITE_BUILTINS only
tf.lite.OpsSet.SELECT_TF_OPS]
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
tflite_model = converter.convert()
open('optimized_converted_model_2.tflite', 'wb').write(tflite_model)
This is the input of the network and it does a convolution fairly early.
As you can see, it appears TensorFlow does not appear to be recognizing this operation: