How to feed tensorflow an image from a url? - tensorflow

The five lines commented out below should work but do not . The prediction score is not anywhere close to what I would expect and when I do plt.imshow(img) it shows the wrong image. Here is the link to my notebook in Colab.
x, y = next(valid_generator)
image = x[0, :, :, :]
true_index = np.argmax(y[0])
plt.imshow(image)
image_url = 'https://mysite_example/share/court3.jpg'
image_url = tf.keras.utils.get_file('Court', origin=image_url )
#img = keras.preprocessing.image.load_img( image_url, target_size=( 224, 224 ) )
#img_array = keras.preprocessing.image.img_to_array(img)
#img_array = tf.expand_dims(img_array, 0)
#prediction_scores = model.predict(np.expand_dims(img_array, axis=0))
#plt.imshow(img)
# Expand the validation image to (1, 224, 224, 3) before predicting the label
prediction_scores = model.predict(np.expand_dims(image, axis=0))
predicted_index = np.argmax(prediction_scores)
print("True label: " + get_class_string_from_index(true_index))
print("Predicted label: " + get_class_string_from_index(predicted_index)

The method tf.keras.utils.get_file downloads the file from url to local cache only if the file is not already cached. So if you are using the same cache name for all the urls ("Court" in your code ?) the you will see only the first file.
Also while training you have a preprocess step of normalizing all the pixels by dividing them with 255. You have to apply the same preprocessing step during inference also.
Working Code:
_, axis = plt.subplots(1,3)
for i, image_url in enumerate(['https://squashvideo.site/share/court3.jpg',
'https://i.pinimg.com/originals/0f/c2/9b/0fc29b35532f8e2fb998f5605212ab27.jpg',
'https://thumbs.dreamstime.com/b/squash-court-photo-empty-30346175.jpg']):
image_url = tf.keras.utils.get_file('Court', origin=image_url )
img = tf.keras.preprocessing.image.load_img(image_url, target_size=( 224, 224 ) )
os.remove(image_url) # Remove the cached file
axis[i].imshow(img)
img_array = keras.preprocessing.image.img_to_array(img)
prediction_scores = model.predict(np.expand_dims(img_array, axis=0)/255)
axis[i].title.set_text(np.argmax(prediction_scores, axis=1))
Output:
As you can see, the predictions are perfect, the last image belong to class 0 (empty squash court) and the second image belong to class 1 (players playing in squash court)

Related

How to get top k predictions for a new Image

I am using this function to predict the output of never seen images
def predictor(img, model):
image = cv2.imread(img)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (224, 224))
image = np.array(image, dtype = 'float32')/255.0
plt.imshow(image)
image = image.reshape(1, 224,224,3)
clas = model.predict(image).argmax()
name = dict_class[clas]
print('The given image is of \nClass: {0} \nSpecies: {1}'.format(clas, name))
how to change it, if I want the top 2(or k) accuracy
i.e
70% chance its dog
15% its a bear
If you are using TensorFlow + Keras and probably doing multi-class classification, then the output of model.predict() is a tensor representing either the logits or already the probabilities (softmax on top of logits).
I am taking this example from here and slightly modifying it : https://www.tensorflow.org/api_docs/python/tf/math/top_k.
#See the softmax, probabilities add up to 1
network_predictions = [0.7,0.2,0.05,0.05]
prediction_probabilities = tf.math.top_k(network_predictions, k=2)
top_2_scores = prediction_probabilities.values.numpy()
dict_class_entries = prediction_probabilities.indices.numpy()
And here in dict_class_entries you have then the indices (sorted ascendingly) in accordance with the probabilities. (i.e. dict_class_entries[0] = 0 (corresponds to 0.7) and top_2_scores[0] = 0.7 etc.).
You just need to replace network_probabilities with model.predict(image).
Notice I removed the argmax() in order to send an array of probabilities instead of the index of the max score/probability position (that is, argmax()).

How to use TensorFlow lite on a raspberry pi 4 without keras?

Basically I want to convert this code snippet to code that opens a tflite model and does not use keras. I can not install keras on my raspberry pi 4 as it needs Tensorflow 2+.
model = keras.models.load_model( saved_model_path )
image_url = tf.keras.utils.get_file('Court', origin='https://squashvideo.site/share/court3.jpg' )
img = tf.keras.preprocessing.image.load_img(image_url, target_size=( 224, 224 ) )
os.remove(image_url) # Remove the cached file
img_array = tf.keras.preprocessing.image.img_to_array(img)
prediction_scores = model.predict(np.expand_dims(img_array, axis=0)/255)
score = tf.nn.softmax(prediction_scores[0])
print(
"This image most likely belongs to {} with a {:.2f} percent confidence."
.format(class_names[np.argmax(score)], 100 * np.max(score))
)
Here's what I have tried which gives the error below:
from PIL import Image
def classify_image(interpreter, image, top_k=1):
tensor_index = interpreter.get_input_details()[0]['index']
input_tensor = interpreter.tensor(tensor_index)()[0]
input_tensor[:, :] = image
interpreter.invoke()
output_details = interpreter.get_output_details()[0]
output = np.squeeze(interpreter.get_tensor(output_details['index']))
scale, zero_point = output_details['quantization']
output = scale * (output - zero_point)
ordered = np.argpartition(-output, top_k)
return [(i, output[i]) for i in ordered[:top_k]][0]
interpreter = Interpreter('/var/www/html/share/AI/court.tflite')
interpreter.allocate_tensors()
_, height, width, _ = interpreter.get_input_details()[0]['shape']
print("Image Shape (", width, ",", height, ")")
data_folder = "/var/www/html/share/"
image = Image.open(data_folder + "court1.jpg").convert('RGB').resize((width, height))
label_id, prob = classify_image(interpreter, image)
Running gives the error:
squash#court1:/var/www/html/share/AI $ python3 test.py
Image Shape ( 224 , 224 )
Traceback (most recent call last):
File "test.py", line 44, in <module>
label_id, prob = classify_image(interpreter, image)
File "test.py", line 22, in classify_image
interpreter.invoke()
File "/home/squash/.local/lib/python3.7/site-packages/tflite_runtime/interpreter.py", line 539, in invoke
self._ensure_safe()
File "/home/squash/.local/lib/python3.7/site-packages/tflite_runtime/interpreter.py", line 287, in _ensure_safe
data access.""")
RuntimeError: There is at least 1 reference to internal data
in the interpreter in the form of a numpy array or slice. Be sure to
only hold the function returned from tensor() if you are using raw
data access.
The error is in the way you are feeding data to the tflite Interpreter here:
input_tensor = interpreter.tensor(tensor_index)()[0]
input_tensor[:, :] = image
The Image.open function return an Image object. You need to convert it into binary data before feeding it to a tensor. An you should use:
interpreter.set_tensor(0, image_data)
to set the data instead of above assignment.
Think I fixed it by doing this:
img = Image.open( image_url ).convert('RGB').resize((224, 224))
img_array = np.array ( img, dtype=np.float32 )
probs_lite = lite_model( np.expand_dims(img_array, axis=0)/255 )[0]
print ( probs_lite )
print (np.argmax(probs_lite))
score = tf.nn.softmax(probs_lite)
print(
"This image most likely belongs to {} with a {:.2f} percent confidence."
.format(class_names[np.argmax(score)], 100 * np.max(score))
)

Tensorflow Lite gives me 0% accuracy for any image

Here is my Colab notebook which anyone should be able to run in 5 seconds. The predictions are always 0. I suspect the problem is in these two lines. Or perhaps my tflite model is corrupt or wrong?
label_id, prob = classify_image(interpreter, np.expand_dims(img_array, axis=0)/255 )
#label_id, prob = classify_image(interpreter, img )
Think I figured it out. Basically the output from the classify_image() function needs furthur processing to be useable. So I need to do this on the output:
label_index= np.argmax(probs_lite)
score = tf.nn.softmax(probs_lite)
print(
"This image most likely belongs to {} with a {:.2f} percent confidence."
.format(class_names[label_index], 100 * np.max(score))
)
And here's my full script in case anyone has similiar issue in the future:
import itertools, time, os, numpy as np,sys
from PIL import Image
import tensorflow as tf
def lite_model(images):
interpreter.allocate_tensors()
interpreter.set_tensor(interpreter.get_input_details()[0]['index'], images)
interpreter.invoke()
return interpreter.get_tensor(interpreter.get_output_details()[0]['index'])
class_names = ['empty_court', 'occupied_court' ]
path_to_tflite = tf.keras.utils.get_file(
"court.tflite",
"https://mysite.site/AI/court.tflite",
untar=False)
interpreter = tf.lite.Interpreter( path_to_tflite )
interpreter.allocate_tensors()
_, height, width, _ = interpreter.get_input_details()[0]['shape']
print("Image Shape (", width, ",", height, ")")
image_url = tf.keras.utils.get_file('Court', origin='https://mysite.site/share/court4.jpg' )
img = tf.keras.preprocessing.image.load_img(image_url, target_size=( width, height ) )
os.remove(image_url) # Remove the cached file
img_array = tf.keras.preprocessing.image.img_to_array(img)
probs_lite = lite_model( np.expand_dims(img_array, axis=0)/255 )[0]
print ( probs_lite )
label_index= np.argmax(probs_lite)
score = tf.nn.softmax(probs_lite)
print(
"This image most likely belongs to {} with a {:.2f} percent confidence."
.format(class_names[label_index], 100 * np.max(score))
)

Modify and combine two different frozen graphs generated using tensorflow object detection API for inference

I am working with TensorFlow object detection API, I have trained two different(SSD-mobilenet and FRCNN-inception-v2) models for my use case. Currently, my workflow is like this:
Take an input image, detect one particular object using SSD
mobilenet.
Crop the input image with the bounding box generated from
step 1 and then resize it to a fixed size(e.g. 200 X 300).
Feed this cropped and resized image to FRCNN-inception-V2 for detecting
smaller objects inside the ROI.
Currently at the time of inferencing, when I load two separate frozen graphs and follow the steps, I am getting my desired results. But I need only a single frozen graph because of my deployment requirement. I am new to TensorFlow and wanted to combine both graphs with crop and resizing process in between them.
Thanks, #matt and #Vedanshu for responding, Here is the updated code that works fine for my requirement, Please give suggestions, if it needs any improvement as I am still learning it.
# Dependencies
import tensorflow as tf
import numpy as np
# load graphs using pb file path
def load_graph(pb_file):
graph = tf.Graph()
with graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(pb_file, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return graph
# returns tensor dictionaries from graph
def get_inference(graph, count=0):
with graph.as_default():
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in ['num_detections', 'detection_boxes', 'detection_scores',
'detection_classes', 'detection_masks', 'image_tensor']:
tensor_name = key + ':0' if count == 0 else '_{}:0'.format(count)
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().\
get_tensor_by_name(tensor_name)
return tensor_dict
# renames while_context because there is one while function for every graph
# open issue at https://github.com/tensorflow/tensorflow/issues/22162
def rename_frame_name(graphdef, suffix):
for n in graphdef.node:
if "while" in n.name:
if "frame_name" in n.attr:
n.attr["frame_name"].s = str(n.attr["frame_name"]).replace("while_context",
"while_context" + suffix).encode('utf-8')
if __name__ == '__main__':
# your pb file paths
frozenGraphPath1 = '...replace_with_your_path/some_frozen_graph.pb'
frozenGraphPath2 = '...replace_with_your_path/some_frozen_graph.pb'
# new file name to save combined model
combinedFrozenGraph = 'combined_frozen_inference_graph.pb'
# loads both graphs
graph1 = load_graph(frozenGraphPath1)
graph2 = load_graph(frozenGraphPath2)
# get tensor names from first graph
tensor_dict1 = get_inference(graph1)
with graph1.as_default():
# getting tensors to add crop and resize step
image_tensor = tensor_dict1['image_tensor']
scores = tensor_dict1['detection_scores'][0]
num_detections = tf.cast(tensor_dict1['num_detections'][0], tf.int32)
detection_boxes = tensor_dict1['detection_boxes'][0]
# I had to add NMS becuase my ssd model outputs 100 detections and hence it runs out of memory becuase of huge tensor shape
selected_indices = tf.image.non_max_suppression(detection_boxes, scores, 5, iou_threshold=0.5)
selected_boxes = tf.gather(detection_boxes, selected_indices)
# intermediate crop and resize step, which will be input for second model(FRCNN)
cropped_img = tf.image.crop_and_resize(image_tensor,
selected_boxes,
tf.zeros(tf.shape(selected_indices), dtype=tf.int32),
[300, 60] # resize to 300 X 60
)
cropped_img = tf.cast(cropped_img, tf.uint8, name='cropped_img')
gdef1 = graph1.as_graph_def()
gdef2 = graph2.as_graph_def()
g1name = "graph1"
g2name = "graph2"
# renaming while_context in both graphs
rename_frame_name(gdef1, g1name)
rename_frame_name(gdef2, g2name)
# This combines both models and save it as one
with tf.Graph().as_default() as g_combined:
x, y = tf.import_graph_def(gdef1, return_elements=['image_tensor:0', 'cropped_img:0'])
z, = tf.import_graph_def(gdef2, input_map={"image_tensor:0": y}, return_elements=['detection_boxes:0'])
tf.train.write_graph(g_combined, "./", combinedFrozenGraph, as_text=False)
You can load output of one graph into another using input_map in import_graph_def. Also you have to rename the while_context because there is one while function for every graph. Something like this:
def get_frozen_graph(graph_file):
"""Read Frozen Graph file from disk."""
with tf.gfile.GFile(graph_file, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
return graph_def
def rename_frame_name(graphdef, suffix):
# Bug reported at https://github.com/tensorflow/tensorflow/issues/22162#issuecomment-428091121
for n in graphdef.node:
if "while" in n.name:
if "frame_name" in n.attr:
n.attr["frame_name"].s = str(n.attr["frame_name"]).replace("while_context",
"while_context" + suffix).encode('utf-8')
...
l1_graph = tf.Graph()
with l1_graph.as_default():
trt_graph1 = get_frozen_graph(pb_fname1)
[tf_input1, tf_scores1, tf_boxes1, tf_classes1, tf_num_detections1] = tf.import_graph_def(trt_graph1,
return_elements=['image_tensor:0', 'detection_scores:0', 'detection_boxes:0', 'detection_classes:0','num_detections:0'])
input1 = tf.identity(tf_input1, name="l1_input")
boxes1 = tf.identity(tf_boxes1[0], name="l1_boxes") # index by 0 to remove batch dimension
scores1 = tf.identity(tf_scores1[0], name="l1_scores")
classes1 = tf.identity(tf_classes1[0], name="l1_classes")
num_detections1 = tf.identity(tf.dtypes.cast(tf_num_detections1[0], tf.int32), name="l1_num_detections")
...
# Make your output tensor
tf_out = # your output tensor (here, crop the input image with the bounding box generated from step 1 and then resize it to a fixed size(e.g. 200 X 300).)
...
connected_graph = tf.Graph()
with connected_graph.as_default():
l1_graph_def = l1_graph.as_graph_def()
g1name = 'ved'
rename_frame_name(l1_graph_def, g1name)
tf.import_graph_def(l1_graph_def, name=g1name)
...
trt_graph2 = get_frozen_graph(pb_fname2)
g2name = 'level2'
rename_frame_name(trt_graph2, g2name)
[tf_scores, tf_boxes, tf_classes, tf_num_detections] = tf.import_graph_def(trt_graph2,
input_map={'image_tensor': tf_out},
return_elements=['detection_scores:0', 'detection_boxes:0', 'detection_classes:0','num_detections:0'])
#######
# Export the graph
with connected_graph.as_default():
print('\nSaving...')
cwd = os.getcwd()
path = os.path.join(cwd, 'saved_model')
shutil.rmtree(path, ignore_errors=True)
inputs_dict = {
"image_tensor": tf_input
}
outputs_dict = {
"detection_boxes_l1": tf_boxes_l1,
"detection_scores_l1": tf_scores_l1,
"detection_classes_l1": tf_classes_l1,
"max_num_detection": tf_max_num_detection,
"detection_boxes_l2": tf_boxes_l2,
"detection_scores_l2": tf_scores_l2,
"detection_classes_l2": tf_classes_l2
}
tf.saved_model.simple_save(
tf_sess_main, path, inputs_dict, outputs_dict
)
print('Ok')

Tensorflow : Predict in Recurrent Neural Networks for Drawing Classification tutorial

I used the tutorial code from https://www.tensorflow.org/tutorials/recurrent_quickdraw and all works fine until I tried to make a prediction instead of just evaluate it.
I wrote a new input function for prediction, based on the code in create_dataset.py
def predict_input_fn():
def parse_line(stroke_points):
"""Parse an ndjson line and return ink (as np array) and classname."""
inkarray = json.loads(stroke_points)
stroke_lengths = [len(stroke[0]) for stroke in inkarray]
total_points = sum(stroke_lengths)
np_ink = np.zeros((total_points, 3), dtype=np.float32)
current_t = 0
for stroke in inkarray:
for i in [0, 1]:
np_ink[current_t:(current_t + len(stroke[0])), i] = stroke[i]
current_t += len(stroke[0])
np_ink[current_t - 1, 2] = 1 # stroke_end
# Preprocessing.
# 1. Size normalization.
lower = np.min(np_ink[:, 0:2], axis=0)
upper = np.max(np_ink[:, 0:2], axis=0)
scale = upper - lower
scale[scale == 0] = 1
np_ink[:, 0:2] = (np_ink[:, 0:2] - lower) / scale
# 2. Compute deltas.
np_ink = np_ink[1:, 0:2] - np_ink[0:-1, 0:2]
np_ink = np_ink[1:, :]
features = {}
features["ink"] = tf.train.Feature(float_list=tf.train.FloatList(value=np_ink.flatten()))
features["shape"] = tf.train.Feature(int64_list=tf.train.Int64List(value=np_ink.shape))
f = tf.train.Features(feature=features)
example = tf.train.Example(features=f)
#t = tf.constant(np_ink)
return example
def parse_example(example):
"""Parse a single record which is expected to be a tensorflow.Example."""
# feature_to_type = {
# "ink": tf.VarLenFeature(dtype=tf.float32),
# "shape": tf.FixedLenFeature((0,2), dtype=tf.int64)
# }
feature_to_type = {
"ink": tf.VarLenFeature(dtype=tf.float32),
"shape": tf.FixedLenFeature([2], dtype=tf.int64)
}
example_proto = example.SerializeToString()
parsed_features = tf.parse_single_example(example_proto, feature_to_type)
parsed_features["ink"] = tf.sparse_tensor_to_dense(parsed_features["ink"])
#parsed_features["shape"].set_shape((2))
return parsed_features
example = parse_line(FLAGS.predict_input_stroke_data)
features = parse_example(example)
dataset = tf.data.Dataset.from_tensor_slices(features)
# Our inputs are variable length, so pad them.
dataset = dataset.padded_batch(FLAGS.batch_size, padded_shapes=dataset.output_shapes)
iterator = dataset.make_one_shot_iterator()
next_feature_batch = iterator.get_next()
return next_feature_batch, None # In prediction, we have no labels
I modified the existing model_fn() function and added below at appropirate place
predictions = tf.argmax(logits, axis=1)
if mode == tf.estimator.ModeKeys.PREDICT:
preds = {
"class_index": predictions,
"probabilities": tf.nn.softmax(logits),
'logits': logits
}
return tf.estimator.EstimatorSpec(mode, predictions=preds)
However when i call the following the code
if (FLAGS.predict_input_stroke_data != None):
# prepare_input_tfrecord_for_prediction()
# predict_results = estimator.predict(input_fn=get_input_fn(
# mode=tf.estimator.ModeKeys.PREDICT,
# tfrecord_pattern=FLAGS.predict_input_temp_file,
# batch_size=FLAGS.batch_size))
predict_results = estimator.predict(input_fn=predict_input_fn)
for idx, prediction in enumerate(predict_results):
type = prediction["class_ids"][0] # Get the predicted class (index)
print("Prediction Type: {}\n".format(type))
I get the following error, what is wrong in my code could anyone please help me. I have tried quite a few things to get the shape right but i am unable to. I also tried to first write my strokes data as a tfrecord and then use the existing input_fn to read from the tfrecord that gives me similar errors but slighly different
File "/Users/farooq/.virtualenvs/tensor1.0/lib/python3.6/site-packages/tensorflow/python/framework/common_shapes.py", line 627, in call_cpp_shape_fn
require_shape_fn)
File "/Users/farooq/.virtualenvs/tensor1.0/lib/python3.6/site-packages/tensorflow/python/framework/common_shapes.py", line 691, in _call_cpp_shape_fn_impl
raise ValueError(err.message)
ValueError: Shape must be rank 2 but is rank 1 for 'Slice' (op: 'Slice') with input shapes: [?], [2], [2].
I finally solved the problem by taking my input keystrokes, writing them to disk as a TFRecord. I also had to write the same inputstrokes batch_size times to same TFRecord, else i got the shape mismatch errors. And then invoking predict worked.
The main addition for prediction was the following function
def create_tfrecord_for_prediction(batch_size, stoke_data, tfrecord_file):
def parse_line(stoke_data):
"""Parse provided stroke data and ink (as np array) and classname."""
inkarray = json.loads(stoke_data)
stroke_lengths = [len(stroke[0]) for stroke in inkarray]
total_points = sum(stroke_lengths)
np_ink = np.zeros((total_points, 3), dtype=np.float32)
current_t = 0
for stroke in inkarray:
if len(stroke[0]) != len(stroke[1]):
print("Inconsistent number of x and y coordinates.")
return None
for i in [0, 1]:
np_ink[current_t:(current_t + len(stroke[0])), i] = stroke[i]
current_t += len(stroke[0])
np_ink[current_t - 1, 2] = 1 # stroke_end
# Preprocessing.
# 1. Size normalization.
lower = np.min(np_ink[:, 0:2], axis=0)
upper = np.max(np_ink[:, 0:2], axis=0)
scale = upper - lower
scale[scale == 0] = 1
np_ink[:, 0:2] = (np_ink[:, 0:2] - lower) / scale
# 2. Compute deltas.
#np_ink = np_ink[1:, 0:2] - np_ink[0:-1, 0:2]
#np_ink = np_ink[1:, :]
np_ink[1:, 0:2] -= np_ink[0:-1, 0:2]
np_ink = np_ink[1:, :]
features = {}
features["ink"] = tf.train.Feature(float_list=tf.train.FloatList(value=np_ink.flatten()))
features["shape"] = tf.train.Feature(int64_list=tf.train.Int64List(value=np_ink.shape))
f = tf.train.Features(feature=features)
ex = tf.train.Example(features=f)
return ex
if stoke_data is None:
print("Error: Stroke data cannot be none")
return
example = parse_line(stoke_data)
#Remove the file if it already exists
if tf.gfile.Exists(tfrecord_file):
tf.gfile.Remove(tfrecord_file)
writer = tf.python_io.TFRecordWriter(tfrecord_file)
for i in range(batch_size):
writer.write(example.SerializeToString())
writer.flush()
writer.close()
Then in the main function you just have to invoke estimator.predict() reusing the same input_fn=get_input_fn(...)argument except point it to the temporary created tfrecord_file
Hope this helps