CNN Cannot Identify Image File - tensorflow

I have made a simple CNN to recognize three types of fish. I am trying to use CNN to classify the image that was not included in training or validation sets. The image is grunts-saltwater.jpg and is on Gdrive. Here is the code for predicting on existing CNN model:
grunts_url = "https://drive.google.com/file/d/1zuA6T_0a9mOUvNWHQ1OACPLaZMCtbIZd/view?usp=sharing"
grunts_path = tf.keras.utils.get_file('grunts-saltwater', origin=grunts_url)
img = keras.preprocessing.image.load_img(
grunts_path, target_size=(img_height, img_width))
img_array = keras.preprocessing.image.img_to_array(img)
img_array = tf.expand_dims(img_array, 0) # Create a batch
predictions = model.predict(img_array)
score = tf.nn.softmax(predictions[0])
print(
"This image most likely belongs to {} with a {:.2f} percent confidence."
.format(class_names[np.argmax(score)], 100 * np.max(score))
)
However, I get the following error:
Downloading data from https://drive.google.com/file/d/1zuA6T_0a9mOUvNWHQ1OACPLaZMCtbIZd/view?usp=sharing
8192/Unknown - 0s 0us/step
---------------------------------------------------------------------------
UnidentifiedImageError Traceback (most recent call last)
<ipython-input-217-d031443047e1> in <module>()
3
4 img = keras.preprocessing.image.load_img(
----> 5 grunts_path, target_size=(img_height, img_width))
6
7 img_array = keras.preprocessing.image.img_to_array(img)
2 frames
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/preprocessing/image.py in load_img(path, grayscale, color_mode, target_size, interpolation)
299 """
300 return image.load_img(path, grayscale=grayscale, color_mode=color_mode,
--> 301 target_size=target_size, interpolation=interpolation)
302
303
/usr/local/lib/python3.6/dist-packages/keras_preprocessing/image/utils.py in load_img(path, grayscale, color_mode, target_size, interpolation)
112 'The use of `load_img` requires PIL.')
113 with open(path, 'rb') as f:
--> 114 img = pil_image.open(io.BytesIO(f.read()))
115 if color_mode == 'grayscale':
116 # if image is not already an 8-bit, 16-bit or 32-bit grayscale image
/usr/local/lib/python3.6/dist-packages/PIL/Image.py in open(fp, mode)
2860 warnings.warn(message)
2861 raise UnidentifiedImageError(
-> 2862 "cannot identify image file %r" % (filename if filename else fp)
2863 )
2864
UnidentifiedImageError: cannot identify image file <_io.BytesIO object at 0x7f9002c637d8>
Can you help with the issue, please? Thanks.

It is because the google drive link of the image is now a downloadable line rather it is a view link. If it is shared then you can convert it into a downloadable link such that http clients can download them.
To covert to downloadable line
Your shared image ID is 1zuA6T_0a9mOUvNWHQ1OACPLaZMCtbIZd so use the link "https://docs.google.com/uc?export=download&id=1zuA6T_0a9mOUvNWHQ1OACPLaZMCtbIZd"
Fixed code:
grunts_url = "https://docs.google.com/uc?export=download&id=1zuA6T_0a9mOUvNWHQ1OACPLaZMCtbIZd"
grunts_path = tf.keras.utils.get_file('grunts-saltwater', origin=grunts_url)
img = keras.preprocessing.image.load_img(grunts_path, target_size=(100, 100))

I managed to resolve the issue. I do not think I was referring to source URL correctly. Here is an example that worked.
gruntfish_url = "https://upload.wikimedia.org/wikipedia/commons/5/54/Blue_Stripe_Grunt._Haemulon_sciurus.jpg"
gruntfish_path = tf.keras.utils.get_file('Grunt.', origin=gruntfish_url)

Related

Problem with 'Unknown image file format' error for GCS image in Tensorflow style transfer demo

I'm wanting to use my own images for this Tensorflow Style Transfer demo, which I've copied to my own Colab notebook.
[https://www.tensorflow.org/hub/tutorials/tf2_arbitrary_image_stylization][1]
I have images stored in a GCS bucket but have been getting image format errors. To test this, I took one of the images from the Tensorflow demo, downloaded it and put it in my GCS bucket, added the link to the "Let's try it on more images" section of my demo code, and am getting the same file format error message I was previously getting with my own images.
Here's where I've inserted the GCS version of the image:
content_urls = dict(
tueblingen02='https://storage.cloud.google.com/01_bucket-02/Tuebingen_Neckarfront-vox.jpeg',
sea_turtle='https://upload.wikimedia.org/wikipedia/commons/d/d7/Green_Sea_Turtle_grazing_seagrass.jpg',
tuebingen='https://upload.wikimedia.org/wikipedia/commons/0/00/Tuebingen_Neckarfront.jpg',
grace_hopper='https://storage.googleapis.com/download.tensorflow.org/example_images/grace_hopper.jpg',
)
style_urls = dict(
kanagawa_great_wave='https://upload.wikimedia.org/wikipedia/commons/0/0a/The_Great_Wave_off_Kanagawa.jpg',
kandinsky_composition_7='https://upload.wikimedia.org/wikipedia/commons/b/b4/Vassily_Kandinsky%2C_1913_-_Composition_7.jpg',
etc ...
The resulting error message:
InvalidArgumentError: Unknown image file format. One of JPEG, PNG, GIF, BMP required. [Op:DecodeImage]
Full message:
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
<ipython-input-16-3ded16359898> in <module>()
26 content_image_size = 384
27 style_image_size = 256
---> 28 content_images = {k: load_image(v, (content_image_size, content_image_size)) for k, v in content_urls.items()}
29 style_images = {k: load_image(v, (style_image_size, style_image_size)) for k, v in style_urls.items()}
30 style_images = {k: tf.nn.avg_pool(style_image, ksize=[3,3], strides=[1,1], padding='SAME') for k, style_image in style_images.items()}
3 frames
<ipython-input-16-3ded16359898> in <dictcomp>(.0)
26 content_image_size = 384
27 style_image_size = 256
---> 28 content_images = {k: load_image(v, (content_image_size, content_image_size)) for k, v in content_urls.items()}
29 style_images = {k: load_image(v, (style_image_size, style_image_size)) for k, v in style_urls.items()}
30 style_images = {k: tf.nn.avg_pool(style_image, ksize=[3,3], strides=[1,1], padding='SAME') for k, style_image in style_images.items()}
<ipython-input-2-1485a3082999> in load_image(image_url, image_size, preserve_aspect_ratio)
19 img = tf.io.decode_image(
20 tf.io.read_file(image_path),
---> 21 channels=3, dtype=tf.float32)[tf.newaxis, ...]
22 img = crop_center(img)
23 img = tf.image.resize(img, image_size, preserve_aspect_ratio=True)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/util/traceback_utils.py in error_handler(*args, **kwargs)
151 except Exception as e:
152 filtered_tb = _process_traceback_frames(e.__traceback__)
--> 153 raise e.with_traceback(filtered_tb) from None
154 finally:
155 del filtered_tb
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/ops.py in raise_from_not_ok_status(e, name)
7184 def raise_from_not_ok_status(e, name):
7185 e.message += (" name: " + name if name is not None else "")
-> 7186 raise core._status_to_exception(e) from None # pylint: disable=protected-access
7187
7188
InvalidArgumentError: Unknown image file format. One of JPEG, PNG, GIF, BMP required. [Op:DecodeImage]
So that's confusing me because the image works fine when it's hosted elsewhere, leading me to believe it's not an image format issue, but something else.
I'd greatly appreciate any input or suggestions on what might be happening here.
thx

train image classification models with colab

I follow the template and change the link , but it doesn't work
https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/tutorials/model_maker_image_classification.ipynb#scrollTo=3jz5x0JoskPv
This is my datasets
https://firebasestorage.googleapis.com/v0/b/lol-fypproject.appspot.com/o/lol.tgz?alt=media&token=d07b81bd-442f-4ebe-920e-3772598fbb20
original code
image_path = tf.keras.utils.get_file(
'flower_photos.tgz',
'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',
extract=True)
image_path = os.path.join(os.path.dirname(image_path), 'flower_photos')
I changed in that
image_path = tf.keras.utils.get_file(
'lol.tgz',
'https://firebasestorage.googleapis.com/v0/b/lol-fypproject.appspot.com/o/lol.tgz?alt=media&token=d07b81bd-442f-4ebe-920e-3772598fbb20',
extract=True)
image_path = os.path.join(os.path.dirname(image_path), 'lol')
the line wrong and error message is showed
data = ImageClassifierDataLoader.from_folder(image_path)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-15-a5e7646aca55> in <module>()
----> 1 data = ImageClassifierDataLoader.from_folder(image_path)
2 train_data, test_data = data.split(0.9)
/usr/local/lib/python3.7/dist-
packages/tensorflow_examples/lite/model_maker/core/data_util/image_dataloader.py
in
from_folder(cls, filename, shuffle)
69 all_image_size = len(all_image_paths)
70 if all_image_size == 0:
---> 71 raise ValueError('Image size is zero')
72
73 if shuffle:
ValueError: Image size is zero
I have find the problem
the path of the zip file is not the right structure as the sample

How to use TensorFlow lite on a raspberry pi 4 without keras?

Basically I want to convert this code snippet to code that opens a tflite model and does not use keras. I can not install keras on my raspberry pi 4 as it needs Tensorflow 2+.
model = keras.models.load_model( saved_model_path )
image_url = tf.keras.utils.get_file('Court', origin='https://squashvideo.site/share/court3.jpg' )
img = tf.keras.preprocessing.image.load_img(image_url, target_size=( 224, 224 ) )
os.remove(image_url) # Remove the cached file
img_array = tf.keras.preprocessing.image.img_to_array(img)
prediction_scores = model.predict(np.expand_dims(img_array, axis=0)/255)
score = tf.nn.softmax(prediction_scores[0])
print(
"This image most likely belongs to {} with a {:.2f} percent confidence."
.format(class_names[np.argmax(score)], 100 * np.max(score))
)
Here's what I have tried which gives the error below:
from PIL import Image
def classify_image(interpreter, image, top_k=1):
tensor_index = interpreter.get_input_details()[0]['index']
input_tensor = interpreter.tensor(tensor_index)()[0]
input_tensor[:, :] = image
interpreter.invoke()
output_details = interpreter.get_output_details()[0]
output = np.squeeze(interpreter.get_tensor(output_details['index']))
scale, zero_point = output_details['quantization']
output = scale * (output - zero_point)
ordered = np.argpartition(-output, top_k)
return [(i, output[i]) for i in ordered[:top_k]][0]
interpreter = Interpreter('/var/www/html/share/AI/court.tflite')
interpreter.allocate_tensors()
_, height, width, _ = interpreter.get_input_details()[0]['shape']
print("Image Shape (", width, ",", height, ")")
data_folder = "/var/www/html/share/"
image = Image.open(data_folder + "court1.jpg").convert('RGB').resize((width, height))
label_id, prob = classify_image(interpreter, image)
Running gives the error:
squash#court1:/var/www/html/share/AI $ python3 test.py
Image Shape ( 224 , 224 )
Traceback (most recent call last):
File "test.py", line 44, in <module>
label_id, prob = classify_image(interpreter, image)
File "test.py", line 22, in classify_image
interpreter.invoke()
File "/home/squash/.local/lib/python3.7/site-packages/tflite_runtime/interpreter.py", line 539, in invoke
self._ensure_safe()
File "/home/squash/.local/lib/python3.7/site-packages/tflite_runtime/interpreter.py", line 287, in _ensure_safe
data access.""")
RuntimeError: There is at least 1 reference to internal data
in the interpreter in the form of a numpy array or slice. Be sure to
only hold the function returned from tensor() if you are using raw
data access.
The error is in the way you are feeding data to the tflite Interpreter here:
input_tensor = interpreter.tensor(tensor_index)()[0]
input_tensor[:, :] = image
The Image.open function return an Image object. You need to convert it into binary data before feeding it to a tensor. An you should use:
interpreter.set_tensor(0, image_data)
to set the data instead of above assignment.
Think I fixed it by doing this:
img = Image.open( image_url ).convert('RGB').resize((224, 224))
img_array = np.array ( img, dtype=np.float32 )
probs_lite = lite_model( np.expand_dims(img_array, axis=0)/255 )[0]
print ( probs_lite )
print (np.argmax(probs_lite))
score = tf.nn.softmax(probs_lite)
print(
"This image most likely belongs to {} with a {:.2f} percent confidence."
.format(class_names[np.argmax(score)], 100 * np.max(score))
)

How to correctly use plot_model?

I can't plot the following model in Google Colab
def build_model(tasks):
img_input = Input(shape=(23, 23, 1), name="image")
coords_input = Input(shape=(4), name="coords") # first two cartesian then spectral
x = Conv2D(filters=32, kernel_size=7, activation='relu', kernel_initializer='GlorotUniform',bias_initializer='GlorotUniform')(img_input)
x = ZeroPadding2D(padding=((0, 1), (0, 1)))(x) # padding to mimic caffe behaviour
x = MaxPool2D(pool_size=(2,2),strides=(2,2))(x)
#...
#...
#some more layers
#...
#...
x = Dense(units=512)(x)
x = BatchNormalization(axis=-1,scale=False)(x)
x = ReLU()(x)
x = Dropout(rate=0.5)(x)
outputs=[]
for i in range(1,tasks+1):
if(tasks==5):
temp = Dense(units=2)(x)
outputs.append(Softmax(name=output_names[i])(temp))
else:
outputs.append(Dense(units=2)(x))
return Model(inputs=[img_input, coords_input], outputs=outputs)
model.summary() works but when I plot the model using tf.keras.utils.plot_model(model,to_file="model.png"), it is giving the following error:
--------------------------------------------------------------------------- AttributeError Traceback (most recent call
last) in ()
3 tf.keras.utils.plot_model(
4 model,
----> 5 to_file="model.png"
6 )
1 frames
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/utils/vis_utils.py
in plot_model(model, to_file, show_shapes, show_layer_names, rankdir,
expand_nested, dpi)
281 rankdir=rankdir,
282 expand_nested=expand_nested,
--> 283 dpi=dpi)
284 if dot is None:
285 return
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/utils/vis_utils.py
in model_to_dot(model, show_shapes, show_layer_names, rankdir,
expand_nested, dpi, subgraph)
141
142 # Append a wrapped layer's label to node's label, if it exists.
--> 143 layer_name = layer.name
144 class_name = layer.class.name
145
AttributeError: 'dict' object has no attribute 'name'
Could you please explain where I am wrong?
I had this issue too.
Try checking
len(model.layers)
len(model._layers)
If len(model._layers) is 1 more (or maybe more than1) than len(model.layers), print the model._layers and model.layers to the terminal:
print(model._layers)
print(model.layers)
I expect you will see a DictWrapper at the end of model._layers and not at the end of model.layers. If this is the case, you can remove this DictWrapper from model._layers by
model._layers = model._layers[:-1].
You should now be able to use plot_model(model).
Note, while this should allow you to use plot_model, I assume there are some potential side effects of just removing a layer TensorFlow inserted. Thus, you might want to do something like (I didn't test the following):
orig_layers = model._layers
model._layers = model._layers[:-1]
plot_model(model)
model._layers = orig_layers
TensorFlow issue AttributeError: 'dict' object has no attribute 'name' #38988 discusses this problem and mentions the following solution of removing any layers which are type dict. This solution seems a little problematic if for some reason you ever are supposed to have a layer that is a dict, but maybe this doesn't happen in practice.
Here is a solution from the link
model._layers = [layer for layer in model._layers if not isinstance(layer, dict)]

Modify and combine two different frozen graphs generated using tensorflow object detection API for inference

I am working with TensorFlow object detection API, I have trained two different(SSD-mobilenet and FRCNN-inception-v2) models for my use case. Currently, my workflow is like this:
Take an input image, detect one particular object using SSD
mobilenet.
Crop the input image with the bounding box generated from
step 1 and then resize it to a fixed size(e.g. 200 X 300).
Feed this cropped and resized image to FRCNN-inception-V2 for detecting
smaller objects inside the ROI.
Currently at the time of inferencing, when I load two separate frozen graphs and follow the steps, I am getting my desired results. But I need only a single frozen graph because of my deployment requirement. I am new to TensorFlow and wanted to combine both graphs with crop and resizing process in between them.
Thanks, #matt and #Vedanshu for responding, Here is the updated code that works fine for my requirement, Please give suggestions, if it needs any improvement as I am still learning it.
# Dependencies
import tensorflow as tf
import numpy as np
# load graphs using pb file path
def load_graph(pb_file):
graph = tf.Graph()
with graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(pb_file, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return graph
# returns tensor dictionaries from graph
def get_inference(graph, count=0):
with graph.as_default():
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in ['num_detections', 'detection_boxes', 'detection_scores',
'detection_classes', 'detection_masks', 'image_tensor']:
tensor_name = key + ':0' if count == 0 else '_{}:0'.format(count)
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().\
get_tensor_by_name(tensor_name)
return tensor_dict
# renames while_context because there is one while function for every graph
# open issue at https://github.com/tensorflow/tensorflow/issues/22162
def rename_frame_name(graphdef, suffix):
for n in graphdef.node:
if "while" in n.name:
if "frame_name" in n.attr:
n.attr["frame_name"].s = str(n.attr["frame_name"]).replace("while_context",
"while_context" + suffix).encode('utf-8')
if __name__ == '__main__':
# your pb file paths
frozenGraphPath1 = '...replace_with_your_path/some_frozen_graph.pb'
frozenGraphPath2 = '...replace_with_your_path/some_frozen_graph.pb'
# new file name to save combined model
combinedFrozenGraph = 'combined_frozen_inference_graph.pb'
# loads both graphs
graph1 = load_graph(frozenGraphPath1)
graph2 = load_graph(frozenGraphPath2)
# get tensor names from first graph
tensor_dict1 = get_inference(graph1)
with graph1.as_default():
# getting tensors to add crop and resize step
image_tensor = tensor_dict1['image_tensor']
scores = tensor_dict1['detection_scores'][0]
num_detections = tf.cast(tensor_dict1['num_detections'][0], tf.int32)
detection_boxes = tensor_dict1['detection_boxes'][0]
# I had to add NMS becuase my ssd model outputs 100 detections and hence it runs out of memory becuase of huge tensor shape
selected_indices = tf.image.non_max_suppression(detection_boxes, scores, 5, iou_threshold=0.5)
selected_boxes = tf.gather(detection_boxes, selected_indices)
# intermediate crop and resize step, which will be input for second model(FRCNN)
cropped_img = tf.image.crop_and_resize(image_tensor,
selected_boxes,
tf.zeros(tf.shape(selected_indices), dtype=tf.int32),
[300, 60] # resize to 300 X 60
)
cropped_img = tf.cast(cropped_img, tf.uint8, name='cropped_img')
gdef1 = graph1.as_graph_def()
gdef2 = graph2.as_graph_def()
g1name = "graph1"
g2name = "graph2"
# renaming while_context in both graphs
rename_frame_name(gdef1, g1name)
rename_frame_name(gdef2, g2name)
# This combines both models and save it as one
with tf.Graph().as_default() as g_combined:
x, y = tf.import_graph_def(gdef1, return_elements=['image_tensor:0', 'cropped_img:0'])
z, = tf.import_graph_def(gdef2, input_map={"image_tensor:0": y}, return_elements=['detection_boxes:0'])
tf.train.write_graph(g_combined, "./", combinedFrozenGraph, as_text=False)
You can load output of one graph into another using input_map in import_graph_def. Also you have to rename the while_context because there is one while function for every graph. Something like this:
def get_frozen_graph(graph_file):
"""Read Frozen Graph file from disk."""
with tf.gfile.GFile(graph_file, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
return graph_def
def rename_frame_name(graphdef, suffix):
# Bug reported at https://github.com/tensorflow/tensorflow/issues/22162#issuecomment-428091121
for n in graphdef.node:
if "while" in n.name:
if "frame_name" in n.attr:
n.attr["frame_name"].s = str(n.attr["frame_name"]).replace("while_context",
"while_context" + suffix).encode('utf-8')
...
l1_graph = tf.Graph()
with l1_graph.as_default():
trt_graph1 = get_frozen_graph(pb_fname1)
[tf_input1, tf_scores1, tf_boxes1, tf_classes1, tf_num_detections1] = tf.import_graph_def(trt_graph1,
return_elements=['image_tensor:0', 'detection_scores:0', 'detection_boxes:0', 'detection_classes:0','num_detections:0'])
input1 = tf.identity(tf_input1, name="l1_input")
boxes1 = tf.identity(tf_boxes1[0], name="l1_boxes") # index by 0 to remove batch dimension
scores1 = tf.identity(tf_scores1[0], name="l1_scores")
classes1 = tf.identity(tf_classes1[0], name="l1_classes")
num_detections1 = tf.identity(tf.dtypes.cast(tf_num_detections1[0], tf.int32), name="l1_num_detections")
...
# Make your output tensor
tf_out = # your output tensor (here, crop the input image with the bounding box generated from step 1 and then resize it to a fixed size(e.g. 200 X 300).)
...
connected_graph = tf.Graph()
with connected_graph.as_default():
l1_graph_def = l1_graph.as_graph_def()
g1name = 'ved'
rename_frame_name(l1_graph_def, g1name)
tf.import_graph_def(l1_graph_def, name=g1name)
...
trt_graph2 = get_frozen_graph(pb_fname2)
g2name = 'level2'
rename_frame_name(trt_graph2, g2name)
[tf_scores, tf_boxes, tf_classes, tf_num_detections] = tf.import_graph_def(trt_graph2,
input_map={'image_tensor': tf_out},
return_elements=['detection_scores:0', 'detection_boxes:0', 'detection_classes:0','num_detections:0'])
#######
# Export the graph
with connected_graph.as_default():
print('\nSaving...')
cwd = os.getcwd()
path = os.path.join(cwd, 'saved_model')
shutil.rmtree(path, ignore_errors=True)
inputs_dict = {
"image_tensor": tf_input
}
outputs_dict = {
"detection_boxes_l1": tf_boxes_l1,
"detection_scores_l1": tf_scores_l1,
"detection_classes_l1": tf_classes_l1,
"max_num_detection": tf_max_num_detection,
"detection_boxes_l2": tf_boxes_l2,
"detection_scores_l2": tf_scores_l2,
"detection_classes_l2": tf_classes_l2
}
tf.saved_model.simple_save(
tf_sess_main, path, inputs_dict, outputs_dict
)
print('Ok')