in __getattr__ raise AttributeError(name) AttributeError: shape - numpy

I'm creating tissue masks for a bunch of pathology images and in one of the steps for preparing them ı had to change the black pixels to white.
my code works for one image but when I want to apply it to image file in a directory I received this error :
I don't understand the error and don't know to solve it.
File "/Users/sepideh/Library/CloudStorage/GoogleDrive-.../My Drive/Remove_empty_pixels/Remove_empty_pixels.py", line 108, in <module>
height, width, _ = img.shape
File "/Users/sepideh/opt/anaconda3/envs/myenv/lib/python3.9/site-packages/PIL/Image.py", line 529, in __getattr__
raise AttributeError(name)
AttributeError: shape
and this is my code :
height, width, _ = img.shape
white_px = np.asarray([255, 255, 255])
black_px = np.asarray([0 , 0 , 0 ])
img2 = np.array(img, copy=True)
for i in range(height):
for j in range(width):
px = img[i][j]
if all(px == black_px):
img2[i][j] = white_px
I want to understand the reason for this error and a solution for it.

Related

How to use TensorFlow lite on a raspberry pi 4 without keras?

Basically I want to convert this code snippet to code that opens a tflite model and does not use keras. I can not install keras on my raspberry pi 4 as it needs Tensorflow 2+.
model = keras.models.load_model( saved_model_path )
image_url = tf.keras.utils.get_file('Court', origin='https://squashvideo.site/share/court3.jpg' )
img = tf.keras.preprocessing.image.load_img(image_url, target_size=( 224, 224 ) )
os.remove(image_url) # Remove the cached file
img_array = tf.keras.preprocessing.image.img_to_array(img)
prediction_scores = model.predict(np.expand_dims(img_array, axis=0)/255)
score = tf.nn.softmax(prediction_scores[0])
print(
"This image most likely belongs to {} with a {:.2f} percent confidence."
.format(class_names[np.argmax(score)], 100 * np.max(score))
)
Here's what I have tried which gives the error below:
from PIL import Image
def classify_image(interpreter, image, top_k=1):
tensor_index = interpreter.get_input_details()[0]['index']
input_tensor = interpreter.tensor(tensor_index)()[0]
input_tensor[:, :] = image
interpreter.invoke()
output_details = interpreter.get_output_details()[0]
output = np.squeeze(interpreter.get_tensor(output_details['index']))
scale, zero_point = output_details['quantization']
output = scale * (output - zero_point)
ordered = np.argpartition(-output, top_k)
return [(i, output[i]) for i in ordered[:top_k]][0]
interpreter = Interpreter('/var/www/html/share/AI/court.tflite')
interpreter.allocate_tensors()
_, height, width, _ = interpreter.get_input_details()[0]['shape']
print("Image Shape (", width, ",", height, ")")
data_folder = "/var/www/html/share/"
image = Image.open(data_folder + "court1.jpg").convert('RGB').resize((width, height))
label_id, prob = classify_image(interpreter, image)
Running gives the error:
squash#court1:/var/www/html/share/AI $ python3 test.py
Image Shape ( 224 , 224 )
Traceback (most recent call last):
File "test.py", line 44, in <module>
label_id, prob = classify_image(interpreter, image)
File "test.py", line 22, in classify_image
interpreter.invoke()
File "/home/squash/.local/lib/python3.7/site-packages/tflite_runtime/interpreter.py", line 539, in invoke
self._ensure_safe()
File "/home/squash/.local/lib/python3.7/site-packages/tflite_runtime/interpreter.py", line 287, in _ensure_safe
data access.""")
RuntimeError: There is at least 1 reference to internal data
in the interpreter in the form of a numpy array or slice. Be sure to
only hold the function returned from tensor() if you are using raw
data access.
The error is in the way you are feeding data to the tflite Interpreter here:
input_tensor = interpreter.tensor(tensor_index)()[0]
input_tensor[:, :] = image
The Image.open function return an Image object. You need to convert it into binary data before feeding it to a tensor. An you should use:
interpreter.set_tensor(0, image_data)
to set the data instead of above assignment.
Think I fixed it by doing this:
img = Image.open( image_url ).convert('RGB').resize((224, 224))
img_array = np.array ( img, dtype=np.float32 )
probs_lite = lite_model( np.expand_dims(img_array, axis=0)/255 )[0]
print ( probs_lite )
print (np.argmax(probs_lite))
score = tf.nn.softmax(probs_lite)
print(
"This image most likely belongs to {} with a {:.2f} percent confidence."
.format(class_names[np.argmax(score)], 100 * np.max(score))
)

tf.dataset, multiple path inputs, and mapping per batch to load images

I'm loading a dataset with multiple input images. The input image paths should only be decoded at batch time, in order to handle a large dataset.
The data set is N image path inputs and M float outputs. The images for each input have different resolutions.
Data is ([img_input_1.png, img_input_2.png, ...], [0.65, 0.7, 0.8])
The model is using the Keras functional api in symbolic mode.
Here is the most recently EDITED code
from itertools import zip_longest
def read_image(path, shape):
try:
image = tf.io.read_file(path)
image = tf.image.decode_png(image)
image = tf.image.resize(image, [shape[1],shape[2]])
image /= 255.0
return image
except:
print('ERROR: preprocess_image: bad path', path)
def load_image(x, y, shp):
pout = [(k, x[k]) for k in x.keys()]
l1 = tf.convert_to_tensor(list(x))
l2 = tf.convert_to_tensor(list(x.values()))
pl = tf.map_fn(
lambda args: (read_image(args[0], shp), args[1]), [l1, l2], dtype=(tf.float32, tf.float32)
)
pl = {path: (pl[0][i], pl[1][i]) for i, path in enumerate(x)}
return (pl,y)
def dataset_prep(json_data, seq, batch_size):
# LOAD DATA FROM JSON
x,y = json_parse_x_y(json_data[seq])
xx = [*zip_longest(*x)] # NOTE: goes from variable sized input to {'input_N':...}
yy = [*zip_longest(*y)]
# GET SHAPES (hard coded atm)
lns = [[len(xxx)] for xxx in xx]
rzs = [[24,512,1],[96,512,1]] # TEMP TODO! grab grom [(v['h'],v['w'],v['c']) for v in xx]
shp = [*zip_longest(*[lns,rzs])]
shp = [list(s) for s in shp]
shp = [[*itertools.chain.from_iterable(s)] for s in shp]
xd = dict([[ "input_{}".format(i+1),np.array(y)] for i,y in [*enumerate(xx)]])
yd = dict([["output_{}".format(i+1),np.array(y)] for i,y in [*enumerate(yy)]])
ds = tf.data.Dataset.from_tensor_slices((xd, yd))
ds = ds.shuffle(10000)
ds = ds.repeat()
ds = ds.map(map_func=lambda x,y: load_image(x, y, shp), num_parallel_calls=AUTOTUNE)
ds = ds.batch(batch_size) if batch_size else ds
ds = ds.prefetch(AUTOTUNE)
return ds
This is the error I'm getting:
Traceback (most recent call last):
File "/home/me/.local/bin/wavfeat", line 11, in <module>
load_entry_point('wavfeat==0.1.0', 'console_scripts', 'wavfeat')()
File "/home/me/.local/lib/python3.6/site-packages/wavfeat/__main__.py", line 91, in main
analysis_batch_sql(obj)
File "/home/me/.local/lib/python3.6/site-packages/wavfeat/analysis_run_csv.py", line 50, in analysis_batch_sql
qy = [*map(lambda c: run_elm(c[0], c[1]), ch)]
File "/home/me/.local/lib/python3.6/site-packages/wavfeat/analysis_run_csv.py", line 50, in <lambda>
qy = [*map(lambda c: run_elm(c[0], c[1]), ch)]
File "/home/me/.local/lib/python3.6/site-packages/wavfeat/analysis_run_csv.py", line 23, in run_elm
out = fn(input, elm)
File "/home/me/.local/lib/python3.6/site-packages/wavfeat/one_sec_onset.py", line 196, in one_sec_onset_train
return train(input, elm)
File "/home/me/.local/lib/python3.6/site-packages/wavfeat/one_sec_onset.py", line 182, in train
ts = dataset_prep(jd, 'train', bc)
File "/home/me/.local/lib/python3.6/site-packages/wavfeat/one_sec_onset.py", line 123, in dataset_prep
ds = ds.map(map_func=lambda x,y: load_image(x, y, shp), num_parallel_calls=AUTOTUNE)
File "/home/me/.local/lib/python3.6/site-packages/tensorflow/python/data/ops/dataset_ops.py", line 1146, in map
self, map_func, num_parallel_calls, preserve_cardinality=True)
File "/home/me/.local/lib/python3.6/site-packages/tensorflow/python/data/ops/dataset_ops.py", line 3264, in __init__
use_legacy_function=use_legacy_function)
File "/home/me/.local/lib/python3.6/site-packages/tensorflow/python/data/ops/dataset_ops.py", line 2591, in __init__
self._function = wrapper_fn._get_concrete_function_internal()
File "/home/me/.local/lib/python3.6/site-packages/tensorflow/python/eager/function.py", line 1366, in _get_concrete_function_internal
*args, **kwargs)
File "/home/me/.local/lib/python3.6/site-packages/tensorflow/python/eager/function.py", line 1360, in _get_concrete_function_internal_garbage_collected
graph_function, _, _ = self._maybe_define_function(args, kwargs)
File "/home/me/.local/lib/python3.6/site-packages/tensorflow/python/eager/function.py", line 1648, in _maybe_define_function
graph_function = self._create_graph_function(args, kwargs)
File "/home/me/.local/lib/python3.6/site-packages/tensorflow/python/eager/function.py", line 1541, in _create_graph_function
capture_by_value=self._capture_by_value),
File "/home/me/.local/lib/python3.6/site-packages/tensorflow/python/framework/func_graph.py", line 716, in func_graph_from_py_func
func_outputs = python_func(*func_args, **func_kwargs)
File "/home/me/.local/lib/python3.6/site-packages/tensorflow/python/data/ops/dataset_ops.py", line 2585, in wrapper_fn
ret = _wrapper_helper(*args)
File "/home/me/.local/lib/python3.6/site-packages/tensorflow/python/data/ops/dataset_ops.py", line 2530, in _wrapper_helper
ret = func(*nested_args)
File "/home/me/.local/lib/python3.6/site-packages/wavfeat/one_sec_onset.py", line 123, in <lambda>
ds = ds.map(map_func=lambda x,y: load_image(x, y, shp), num_parallel_calls=AUTOTUNE)
File "/home/me/.local/lib/python3.6/site-packages/wavfeat/one_sec_data_loader.py", line 91, in load_image
print("x['input_1'].values(): ", x['input_1'].values())
AttributeError: 'Tensor' object has no attribute 'values'
What am I doing that is preventing the paths from being loaded?
EDIT:
Attempting pandrey's fix, I'm getting input errors. Here is the data before from_tensor_slices and ds.map and then after:
pre_from_tensor_slices x: {'input_1': array(['/media/me/sp_data/sp_data/datasets/chr_01/one_sec_onset_11_oac-leg/7388_39216_30--id=7388__sql_table=oac_1__sql_idx=405167__pitch=30__onset=39216.png',
'/media/me/sp_data/sp_data/datasets/chr_01/one_sec_onset_11_oac-leg/2447_864_27--id=2447__sql_table=oac_1__sql_idx=415458__pitch=27__onset=864.png',
'/media/me/sp_data/sp_data/datasets/chr_01/one_sec_onset_11_oac-leg/2386_20208_38--id=2386__sql_table=oac_1__sql_idx=433248__pitch=38__onset=20208.png',
...,
'/media/me/sp_data/sp_data/datasets/chr_01/one_sec_onset_11_oac-leg/6261_24528_57--id=6261__sql_table=oac_1__sql_idx=449753__pitch=57__onset=24528.png',
'/media/me/sp_data/sp_data/datasets/chr_01/one_sec_onset_11_oac-leg/3727_22944_31--id=3727__sql_table=oac_1__sql_idx=407620__pitch=31__onset=22944.png',
'/media/me/sp_data/sp_data/datasets/chr_01/one_sec_onset_11_oac-leg/1668_7056_60--id=1668__sql_table=oac_1__sql_idx=381152__pitch=60__onset=7056.png'],
dtype='<U162'), 'input_2': array(['/media/me/sp_data/sp_data/datasets/mel_01/one_sec_onset_11_oac-leg/7388_39216_30--id=7388__sql_table=oac_1__sql_idx=405167__pitch=30__onset=39216.png',
'/media/me/sp_data/sp_data/datasets/mel_01/one_sec_onset_11_oac-leg/2447_864_27--id=2447__sql_table=oac_1__sql_idx=415458__pitch=27__onset=864.png',
'/media/me/sp_data/sp_data/datasets/mel_01/one_sec_onset_11_oac-leg/2386_20208_38--id=2386__sql_table=oac_1__sql_idx=433248__pitch=38__onset=20208.png',
...,
'/media/me/sp_data/sp_data/datasets/mel_01/one_sec_onset_11_oac-leg/6261_24528_57--id=6261__sql_table=oac_1__sql_idx=449753__pitch=57__onset=24528.png',
'/media/me/sp_data/sp_data/datasets/mel_01/one_sec_onset_11_oac-leg/3727_22944_31--id=3727__sql_table=oac_1__sql_idx=407620__pitch=31__onset=22944.png',
'/media/me/sp_data/sp_data/datasets/mel_01/one_sec_onset_11_oac-leg/1668_7056_60--id=1668__sql_table=oac_1__sql_idx=381152__pitch=60__onset=7056.png'],
dtype='<U162')}
pre_from_tensor_slices y: {'output_1': array([0.817, 0.018, 0.421, ..., 0.511, 0.478, 0.147])}
_________________________
y: {'output_1': <tf.Tensor 'args_2:0' shape=() dtype=float64>}
x: {'input_1': <tf.Tensor 'args_0:0' shape=() dtype=string>, 'input_2': <tf.Tensor 'args_1:0' shape=() dtype=string>}
x.values(): dict_values([<tf.Tensor 'args_0:0' shape=() dtype=string>, <tf.Tensor 'args_1:0' shape=() dtype=string>])
x['input_1']: Tensor("args_0:0", shape=(), dtype=string)
Running x['input_1'].values() throws an error: 'Tensor' object has no attribute 'values'
I get an error situated around map_fn
File "/home/me/.local/lib/python3.6/site-packages/tensorflow/python/framework/constant_op.py", line 284, in _constant_impl
allow_broadcast=allow_broadcast))
File "/home/me/.local/lib/python3.6/site-packages/tensorflow/python/framework/tensor_util.py", line 455, in make_tensor_proto
raise ValueError("None values not supported.")
ValueError: None values not supported.
EDIT 2
Attempting the latest I get the following error
Traceback (most recent call last):
File "/home/me/.local/bin/wavfeat", line 11, in <module>
load_entry_point('wavfeat==0.1.0', 'console_scripts', 'wavfeat')()
File "/home/me/.local/lib/python3.6/site-packages/wavfeat/__main__.py", line 91, in main
analysis_batch_sql(obj)
File "/home/me/.local/lib/python3.6/site-packages/wavfeat/analysis_run_csv.py", line 50, in analysis_batch_sql
qy = [*map(lambda c: run_elm(c[0], c[1]), ch)]
File "/home/me/.local/lib/python3.6/site-packages/wavfeat/analysis_run_csv.py", line 50, in <lambda>
qy = [*map(lambda c: run_elm(c[0], c[1]), ch)]
File "/home/me/.local/lib/python3.6/site-packages/wavfeat/analysis_run_csv.py", line 23, in run_elm
out = fn(input, elm)
File "/home/me/.local/lib/python3.6/site-packages/wavfeat/one_sec_onset.py", line 216, in one_sec_onset_train
return train(input, elm)
File "/home/me/.local/lib/python3.6/site-packages/wavfeat/one_sec_onset.py", line 203, in train
vs = validation_prep(jd, 'validation', bc)
File "/home/me/.local/lib/python3.6/site-packages/wavfeat/one_sec_onset.py", line 176, in validation_prep
ds = ds.map(map_func=load_and_preprocess_from_path_label, num_parallel_calls=AUTOTUNE)
File "/home/me/.local/lib/python3.6/site-packages/tensorflow/python/data/ops/dataset_ops.py", line 1146, in map
self, map_func, num_parallel_calls, preserve_cardinality=True)
File "/home/me/.local/lib/python3.6/site-packages/tensorflow/python/data/ops/dataset_ops.py", line 3264, in __init__
use_legacy_function=use_legacy_function)
File "/home/me/.local/lib/python3.6/site-packages/tensorflow/python/data/ops/dataset_ops.py", line 2591, in __init__
self._function = wrapper_fn._get_concrete_function_internal()
File "/home/me/.local/lib/python3.6/site-packages/tensorflow/python/eager/function.py", line 1366, in _get_concrete_function_internal
*args, **kwargs)
File "/home/me/.local/lib/python3.6/site-packages/tensorflow/python/eager/function.py", line 1360, in _get_concrete_function_internal_garbage_collected
graph_function, _, _ = self._maybe_define_function(args, kwargs)
File "/home/me/.local/lib/python3.6/site-packages/tensorflow/python/eager/function.py", line 1648, in _maybe_define_function
graph_function = self._create_graph_function(args, kwargs)
File "/home/me/.local/lib/python3.6/site-packages/tensorflow/python/eager/function.py", line 1541, in _create_graph_function
capture_by_value=self._capture_by_value),
File "/home/me/.local/lib/python3.6/site-packages/tensorflow/python/framework/func_graph.py", line 716, in func_graph_from_py_func
func_outputs = python_func(*func_args, **func_kwargs)
File "/home/me/.local/lib/python3.6/site-packages/tensorflow/python/data/ops/dataset_ops.py", line 2585, in wrapper_fn
ret = _wrapper_helper(*args)
File "/home/me/.local/lib/python3.6/site-packages/tensorflow/python/data/ops/dataset_ops.py", line 2530, in _wrapper_helper
ret = func(*nested_args)
File "/home/me/.local/lib/python3.6/site-packages/wavfeat/one_sec_data_loader.py", line 47, in load_and_preprocess_from_path_label
pl = dict([(pk, tf.map_fn(load_and_preprocess_image, po, dtype=tf.float32)) for pk,po in pout])
File "/home/me/.local/lib/python3.6/site-packages/wavfeat/one_sec_data_loader.py", line 47, in <listcomp>
pl = dict([(pk, tf.map_fn(load_and_preprocess_image, po, dtype=tf.float32)) for pk,po in pout])
File "/home/me/.local/lib/python3.6/site-packages/tensorflow/python/ops/map_fn.py", line 214, in map_fn
raise ValueError("elems must be a 1+ dimensional Tensor, not a scalar")
ValueError: elems must be a 1+ dimensional Tensor, not a scalar
Add-on: not using dict structures
This is a full code (save for defining json_parse_x_y and declaring AUTOTUNE) to achieve what you are attempting without using dict structures.
I tested that make_dataset works (see example below), so if you encounter an issue it should be due to a specification error regarding load_tensors.
from itertools import zip_longest
import tensorflow as tf
# additionnally, `json_parse_x_y` must be defined
# and `AUTOTUNE` must be declared (in my example, I set it to 2)
def read_image(path, shape):
"""Read an image of givent filepath and tensor shape.
Return a float tensor of given shape.
"""
try:
image = tf.io.read_file(path)
image = tf.image.decode_png(image)
image = tf.image.resize(image, [shape[1], shape[2]])
image /= 255.0
return image
except:
raise FileNotFoundError("preprocess_image: bad path '%s'" % path)
def load_images(paths, shapes):
"""Load an ensemble of images (associated with a single sample).
paths : rank-1 string Tensor
shapes : list of images' shapes (same length as `paths`)
Return a tuple of float tensors containing the loaded images.
"""
return tuple((
read_image(paths[i], shapes[i])
for i in range(len(shapes))
))
def load_tensors(json_data, seq):
"""Load images descriptors from a json dump.
Return a tuple containing:
* a rank-2 tensor containing lists of image paths (str)
* a rank-2 tensor containing resolution values (float)
* a list of image shapes, of same length as the rank-2
tensor's second axis
"""
x,y = json_parse_x_y(json_data[seq])
xx = [*zip_longest(*x)] # NOTE: goes from variable sized input to {'input_N':...}
yy = [*zip_longest(*y)]
# GET SHAPES (hard coded atm)
lns = [[len(xxx)] for xxx in xx]
rzs = [[24,512,1],[96,512,1]] # TEMP TODO! grab grom [(v['h'],v['w'],v['c']) for v in xx]
shp = [*zip_longest(*[lns,rzs])]
shp = [list(s) for s in shp]
shp = [[*itertools.chain.from_iterable(s)] for s in shp]
return (xx, yy, shp)
def make_dataset(xx, yy, shp, batch_size):
"""Build a Dataset instance containing loaded images.
xx, yy, shp : see the specification of `load_tensors`'s outputs
batch_size : batch size to set on the Dataset
Return a Dataset instance where each batched sample is a tuple
containing two elements: first, a tuple containing N loaded images'
rank-3 tensors; second, a rank-1 tensor containing M float values.
(to be clear: batching adds a dimension to all those tensors)
"""
data = tf.data.Dataset.from_tensor_slices((xx, yy))
data = data.shuffle(10000)
data = data.map(lambda x, y: (load_images(x, shapes), y))
data = data.repeat()
data = data.batch(batch_size) if batch_size else data
data = data.prefetch(AUTOTUNE)
return data
def dataset_prep(json_data, seq, batch_size):
"""Full pipeline to making a Dataset from json."""
xx, yy, shapes = load_tensors(json_data, seq)
return make_dataset(xx, yy, shapes)
Example, using "hand-made' values ; all images are actually
this classic image, of shape [512, 512, 3].
import numpy as np
import tensorflow as tf
# import previous code
# Here, N = 2, and I make 2 samples.
x = tf.convert_to_tensor(np.array([
['image_1a.png', 'image_1b.png'],
['image_2a.png', 'image_2b.png']
]))
shapes = [[1, 512, 512], [1, 512, 512]] # images are initially [512, 512, 3]
# Here, M = 3, and I make 2 samples. Values are purely random.
y = tf.convert_to_tensor(np.array([
[.087, .92, .276],
[.242, .37, .205]
]))
# This should work.
data = make_dataset(x, y, shapes, batch_size=1)
# Output signature is <PrefetchDataset shapes:
# (((None, 512, 512, None), (None, 512, 512, None)), (None, 3)),
# types: ((tf.float32, tf.float32), tf.float64)
# >
# Where the first None is actually `batch_size`
# and the second is, in this case, 3.
Answer to the current question:
Okay, the problem you are now encountering is that the revised load_image function does not fit the specifications of the Dataset, hence the exception raising. Please find below a full edited code that seems to work (I ran a test using custom images on my computer, with xd / yd dict initialized to look like your reported x and y in-dataset tensors). It is not pretty, and I would personally advise to drop the dict structures, but it works:
from itertools import zip_longest
def read_image(path, shape):
try:
image = tf.io.read_file(path)
image = tf.image.decode_png(image)
image = tf.image.resize(image, [shape[1],shape[2]])
image /= 255.0
return image
except:
raise FileNotFoundError("preprocess_image: bad path '%s'" % path)
# CHANGED: load_image is actually useless
def dataset_prep(json_data, seq, batch_size):
# LOAD DATA FROM JSON
x,y = json_parse_x_y(json_data[seq])
xx = [*zip_longest(*x)] # NOTE: goes from variable sized input to {'input_N':...}
yy = [*zip_longest(*y)]
# GET SHAPES (hard coded atm)
lns = [[len(xxx)] for xxx in xx]
rzs = [[24,512,1],[96,512,1]] # TEMP TODO! grab grom [(v['h'],v['w'],v['c']) for v in xx]
shp = [*zip_longest(*[lns,rzs])]
shp = [list(s) for s in shp]
shp = [[*itertools.chain.from_iterable(s)] for s in shp]
xd = dict([[ "input_{}".format(i+1),np.array(y)] for i,y in [*enumerate(xx)]])
yd = dict([["output_{}".format(i+1),np.array(y)] for i,y in [*enumerate(yy)]])
ds = tf.data.Dataset.from_tensor_slices((xd, yd))
ds = ds.shuffle(10000)
# CHANGED: the following line, to run images import (also moved epeat instruction later)
ds = ds.map(
lambda x, y: (
{key: read_image(path, shp[i]) for i, (key, path) in enumerate(x.items())},
y
),
num_parallel_calls=AUTOTUNE
)
ds = ds.repeat()
ds = ds.batch(batch_size) if batch_size else ds
ds = ds.prefetch(AUTOTUNE)
return ds
Initial answer (before question edit):
I will only deal with the exception raised by load_image in this answer, but there might be additional work to perform on the rest - I did not test for that, not having a convenient dataset at hand.
The exception message is actually quite explicit: you are passing a scalar element (e.g. n in [(k, tf.map_fn(lambda x: read_image(x, shp), n, dtype=tf.float32)) for k,n in pout]) as elems argument to tf.map_fn, when it expects a tensor (or (possibly nested) list or tuple of tensors), as clearly specified in its documentation.
You are also using tf.map_fn the wrong way in the quoted line of code, because basically you are mixing it up with a python intention list, when you should use either one or the other.
With intention list (also replacing the useless previous lines of the load_image function):
pl = {path: (load_image(path, shp), res) for path, res in x.items()}
With tf.map_fn:
# Read all images, return two tensors, one with images, the other with resolutions.
# (so, resolutions inclusion in this is actually useless and should be redesigned)
pl = tf.map_fn(
lambda args: (read_image(args[0], shp), args[1]),
[tf.convert_to_tensor(list(x)), tf.convert_to_tensor(list(x.values()))],
dtype=(tf.float32, tf.float32)
)
# If you really, really want to return a dict, but is it an optimal design?
pl = {path: (pl[0][i], pl[1][i]) for i, path in enumerate(x)}
I do not know whether returning a dict specified in this way is optimal (or even compatible) with Dataset instantiation, however if the rest of your code is working, this should do the trick.
At any rate, if you want to iterate over a dict, go ahead and use either the first version or a modified version of the second one (which may have the advantage of parallelizing images reading).
I hope this helps :-)

decode_jpeg, encode_jpeg type error

I am adapting to my own needs the following code
https://github.com/tensorflow/models/blob/master/slim/datasets/download_and_convert_flowers.py
I need to add zero-padding and resize the images to 299x299 (inception V3 input size).
I am doing this adding some lines of code changing the original
image_data = tf.gfile.FastGFile(filenames[i], 'r').read()
height, width = image_reader.read_image_dims(sess, image_data)
class_name = os.path.basename(os.path.dirname(filenames[i]))
class_id = class_names_to_ids[class_name]
example = dataset_utils.image_to_tfexample(
image_data, 'jpg', height, width, class_id)
with this
image_data = tf.gfile.FastGFile(filenames[i], 'r').read()
height, width = image_reader.read_image_dims(sess, image_data)
image_decoded = tf.image.decode_jpeg(image_data, channels=None, ratio=None, fancy_upscaling=None, try_recover_truncated=None, acceptable_fraction=None, name=None)
M=max(width,height)
image_decoded = tf.image.pad_to_bounding_box(image_decoded, int(math.floor((M-height)/2)), int(math.floor((M-width)/2)), M, M)
image_decoded = tf.expand_dims(image_decoded, 0)
image_decoded = tf.image.resize_bilinear(image_decoded, [299, 299], align_corners=None, name=None)
image_decoded = tf.squeeze(image_decoded)
image_decoded = tf.bitcast(image_decoded, tf.uint8)
image_data = tf.image.encode_jpeg(image_decoded)
class_name = os.path.basename(os.path.dirname(filenames[i]))
class_id = class_names_to_ids[class_name]
example = dataset_utils.image_to_tfexample(image_data, b'jpg', height, width, class_id)
I get the following error
File "convert_dataset.py", line 236, in <module>
tf.app.run()
File "/home/franco/tensorflow/local/lib/python2.7/site-packages/tensorflow/python/platform/app.py", line 44, in run
_sys.exit(main(_sys.argv[:1] + flags_passthrough))
File "convert_dataset.py", line 233, in main
run(FLAGS.dataset_dir)
File "convert_dataset.py", line 217, in run
dataset_dir)
File "convert_dataset.py", line 165, in _convert_dataset
example = dataset_utils.image_to_tfexample(image_data, b'jpg', height, width, class_id)
File "/home/franco/Desktop/dataset_originario/dataset/dataset_utils.py", line 58, in image_to_tfexample
'image/encoded': bytes_feature(image_data),
File "/home/franco/Desktop/dataset_originario/dataset/dataset_utils.py", line 53, in bytes_feature
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values]))
File "/home/franco/tensorflow/lib/python2.7/site-packages/google/protobuf/internal/python_message.py", line 508, in init
copy.extend(field_value)
File "/home/franco/tensorflow/lib/python2.7/site-packages/google/protobuf/internal/containers.py", line 275, in extend
new_values = [self._type_checker.CheckValue(elem) for elem in elem_seq_iter]
File "/home/franco/tensorflow/lib/python2.7/site-packages/google/protobuf/internal/type_checkers.py", line 109, in CheckValue
raise TypeError(message)
TypeError: <tf.Tensor 'EncodeJpeg:0' shape=() dtype=string> has type <class 'tensorflow.python.framework.ops.Tensor'>, but expected one of: ((<type 'str'>,),)
I have only found this open issue
https://github.com/tensorflow/models/issues/726
Maybe there is something else wrong in my code
I add .eval() at this step image_data = tf.image.encode_jpeg(image_decoded)
It stays like this:
image_data = tf.image.encode_jpeg(image_decoded).eval()
Your github link is non-existent so I could not evaluate if dataset_utils.image_to_tfexample follows this same pattern as below, but it looks like it does from the arguments.
def bytes_feature(values):
"""Returns a TF-Feature of bytes.
Args:
values: A string.
Returns:
a TF-Feature.
"""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values]))
def image_to_tfexample(image_data, image_format, height, width):
feature={
'image/encoded': bytes_feature(image_data.eval()),
'image/format': bytes_feature(image_format),
'image/height': int64_feature(height),
'image/width': int64_feature(width),
}
return tf.train.Example(features=tf.train.Features(feature=feature))

Multiple labels with tensorflow

I am trying to modify this code (see GitHub link below), so that I can use my own data and predict more than one label using the same set of features.
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/tutorials/input_fn/boston.py
I have it working when I use one label at a time. However when I try to create a tensor which contains more than one label, I run into problems. Any suggestions?
My modified LABELS and input_fn look like this:
LABELS = ["Label1", "Label2", "Label3"]
def input_fn(data_set):
feature_cols = {k: tf.constant(len(data_set), shape=[data_set[k].size, 1]) for k in FEATURES}
labels_data = []
for i in range(0, len(data_set)):
temp = []
for label in LABELS:
temp.append(data_set[label].values[i])
labels_data.append(temp)
labels = tf.constant(labels_data, shape=[len(data_set), len(LABELS)])
return feature_cols, labels
This is the end of the error message I get:
File "/usr/lib/python2.7/site-packages/tensorflow/contrib/learn/python/learn/estimators/dnn.py", line 175, in _dnn_model_fn
return head.head_ops(features, labels, mode, _train_op_fn, logits)
File "/usr/lib/python2.7/site-packages/tensorflow/contrib/learn/python/learn/estimators/head.py", line 403, in head_ops
head_name=self.head_name)
File "/usr/lib/python2.7/site-packages/tensorflow/contrib/learn/python/learn/estimators/head.py", line 1358, in _training_loss
loss_fn(logits, labels),
File "/usr/lib/python2.7/site-packages/tensorflow/contrib/learn/python/learn/estimators/head.py", line 330, in _mean_squared_loss
logits.get_shape().assert_is_compatible_with(labels.get_shape())
File "/usr/lib/python2.7/site-packages/tensorflow/python/framework/tensor_shape.py", line 735, in assert_is_compatible_with
raise ValueError("Shapes %s and %s are incompatible" % (self, other))
ValueError: Shapes (118, 1) and (118, 3) are incompatible
from this link it seems like you have to specify the number of classes (labels) to 3:
regressor = tf.contrib.learn.DNNRegressor(feature_columns=feature_cols,
n_classes=3,
hidden_units=[10, 10],
model_dir="/tmp/boston_model")
the n_classes value should perhaps be set to 2 instead of 3, see which one works
From here, you have to change this parameter :
label_dimension: Dimension of the label for multilabels. Defaults to 1.
So this should work :
regressor = tf.contrib.learn.DNNRegressor(feature_columns=feature_cols,
label_dimension=3,
hidden_units=[10, 10],
model_dir="/tmp/boston_model")

Pygame gives me TypeError: add() argument after * must be a sequence, not Ball when adding Ball to sprite group

Recently, I have been messing around with pygame and I decided to make a pong clone. However, I am running into problems with the ball class.
This is my class:
class Ball(pygame.sprite.Sprite):
""" This class represents the ball that bounces around. """
# Constructor function
def __init__(self, x, y):
# Call the parent's constructor
pygame.sprite.Sprite().__init__(self)
# Set height, width
self.image = pygame.Surface([15, 15])
self.image.fill(white)
# Make our top-left corner the passed-in location.
self.rect = self.image.get_rect()
self.rect.y = y
self.rect.x = x
# Set speed vector
self.change_x = 0
self.change_y = 0
def goal(self):
if self.rect.x <= SCREEN_WIDTH:
playerscore =+ 1
print playerscore
elif self.rect.x >= 0:
aiscore =+ 1
print aiscore
def update(self):
""" Update the ball's position. """
# Get the old position, in case we need to go back to it
old_x = self.rect.x
new_x = old_x + self.change_x
self.rect.x = new_x
# Did this update cause us to hit a wall?
collide = pygame.sprite.spritecollide(self, allsprites_list, False)
if collide:
# Whoops, hit a wall. Go back to the old position
self.rect.x = old_x
self.change_x *= -1
old_y = self.rect.y
new_y = old_y + self.change_y
self.rect.y = new_y
# Did this update cause us to hit a wall?
collide = pygame.sprite.spritecollide(self, allsprites_list, False)
if collide:
# Whoops, hit a wall. Go back to the old position
self.rect.y = old_y
self.change_y *= -1
if self.rect.x < -20 or self.rect.x > screen_width + 20:
self.change_x = 0
self.change_y = 0
This adds the ball to a sprite group:
self.ball = Ball(100, 250)
self.all_sprites_list.add(self.ball)
And this is the traceback:
Traceback (most recent call last):
File "C:/Users/Enigma/Desktop/pong.py", line 312, in <module>
main()
File "C:/Users/Enigma/Desktop/pong.py", line 290, in main
game = Game()
File "C:/Users/Enigma/Desktop/pong.py", line 218, in __init__
self.ball = Ball(100, 250)
File "C:/Users/Enigma/Desktop/pong.py", line 83, in __init__
pygame.sprite.Sprite().__init__(self)
File "C:\Python27\lib\site-packages\pygame\sprite.py", line 114, in __init__
if groups: self.add(groups)
File "C:\Python27\lib\site-packages\pygame\sprite.py", line 129, in add
else: self.add(*group)
File "C:\Python27\lib\site-packages\pygame\sprite.py", line 129, in add
else: self.add(*group)
TypeError: add() argument after * must be a sequence, not Ball
I have searched the web and all of the posts that I could find here at SO, however none of the seem to apply to this particular conundrum. Any and all help would be appreciated.
I am running python 2.7.9 on Windows 7.
This line:
pygame.sprite.Sprite().__init__(self)
is almost certainly wrong. You want to call the method on the class, not an instance.
pygame.sprite.Sprite.__init__(self)