I have created a function which adds all my images to a list.
The function is as follows:
def load_data(train_path,test_path):
X_train=[]
X_test=[]
for i in os.listdir(train_path):
X_train.append(i)
for j in os.listdir(test_path):
X_test.append(j)
return X_train,X_test
When I try to display Image using indexing X_train[10] I get a file not found Error.
img=mpimg.imread(X_train[10])
imgplot = plt.imshow(img)
plt.show()
The error is as Followed:
FileNotFoundError Traceback (most recent call last)
<ipython-input-7-869e21232029> in <module>()
----> 1 img=mpimg.imread(X_train[10])
2 imgplot = plt.imshow(img)
3 plt.show()
/Users/ViditShah/anaconda/envs/dl/lib/python3.6/site-packages/matplotlib/image.py in imread(fname, format)
1295 return handler(fd)
1296 else:
-> 1297 with open(fname, 'rb') as fd:
1298 return handler(fd)
1299 else:
FileNotFoundError: [Errno 2] No such file or directory: 'scan_0001001.png'
listdir() only returns the file name, not the full path.
You need to store the full file path in your list
X_train.append(os.path.join(train_path, i))
Related
I'm wanting to use my own images for this Tensorflow Style Transfer demo, which I've copied to my own Colab notebook.
[https://www.tensorflow.org/hub/tutorials/tf2_arbitrary_image_stylization][1]
I have images stored in a GCS bucket but have been getting image format errors. To test this, I took one of the images from the Tensorflow demo, downloaded it and put it in my GCS bucket, added the link to the "Let's try it on more images" section of my demo code, and am getting the same file format error message I was previously getting with my own images.
Here's where I've inserted the GCS version of the image:
content_urls = dict(
tueblingen02='https://storage.cloud.google.com/01_bucket-02/Tuebingen_Neckarfront-vox.jpeg',
sea_turtle='https://upload.wikimedia.org/wikipedia/commons/d/d7/Green_Sea_Turtle_grazing_seagrass.jpg',
tuebingen='https://upload.wikimedia.org/wikipedia/commons/0/00/Tuebingen_Neckarfront.jpg',
grace_hopper='https://storage.googleapis.com/download.tensorflow.org/example_images/grace_hopper.jpg',
)
style_urls = dict(
kanagawa_great_wave='https://upload.wikimedia.org/wikipedia/commons/0/0a/The_Great_Wave_off_Kanagawa.jpg',
kandinsky_composition_7='https://upload.wikimedia.org/wikipedia/commons/b/b4/Vassily_Kandinsky%2C_1913_-_Composition_7.jpg',
etc ...
The resulting error message:
InvalidArgumentError: Unknown image file format. One of JPEG, PNG, GIF, BMP required. [Op:DecodeImage]
Full message:
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
<ipython-input-16-3ded16359898> in <module>()
26 content_image_size = 384
27 style_image_size = 256
---> 28 content_images = {k: load_image(v, (content_image_size, content_image_size)) for k, v in content_urls.items()}
29 style_images = {k: load_image(v, (style_image_size, style_image_size)) for k, v in style_urls.items()}
30 style_images = {k: tf.nn.avg_pool(style_image, ksize=[3,3], strides=[1,1], padding='SAME') for k, style_image in style_images.items()}
3 frames
<ipython-input-16-3ded16359898> in <dictcomp>(.0)
26 content_image_size = 384
27 style_image_size = 256
---> 28 content_images = {k: load_image(v, (content_image_size, content_image_size)) for k, v in content_urls.items()}
29 style_images = {k: load_image(v, (style_image_size, style_image_size)) for k, v in style_urls.items()}
30 style_images = {k: tf.nn.avg_pool(style_image, ksize=[3,3], strides=[1,1], padding='SAME') for k, style_image in style_images.items()}
<ipython-input-2-1485a3082999> in load_image(image_url, image_size, preserve_aspect_ratio)
19 img = tf.io.decode_image(
20 tf.io.read_file(image_path),
---> 21 channels=3, dtype=tf.float32)[tf.newaxis, ...]
22 img = crop_center(img)
23 img = tf.image.resize(img, image_size, preserve_aspect_ratio=True)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/util/traceback_utils.py in error_handler(*args, **kwargs)
151 except Exception as e:
152 filtered_tb = _process_traceback_frames(e.__traceback__)
--> 153 raise e.with_traceback(filtered_tb) from None
154 finally:
155 del filtered_tb
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/ops.py in raise_from_not_ok_status(e, name)
7184 def raise_from_not_ok_status(e, name):
7185 e.message += (" name: " + name if name is not None else "")
-> 7186 raise core._status_to_exception(e) from None # pylint: disable=protected-access
7187
7188
InvalidArgumentError: Unknown image file format. One of JPEG, PNG, GIF, BMP required. [Op:DecodeImage]
So that's confusing me because the image works fine when it's hosted elsewhere, leading me to believe it's not an image format issue, but something else.
I'd greatly appreciate any input or suggestions on what might be happening here.
thx
Ive'been trying to transform all my logs in a dict through xmltodict.parse function
The thing is, when I try to convert a single row to a variable it works fine
a = xmltodict.parse(df['CONFIG'][0])
Same to
parsed[1] = xmltodict.parse(df['CONFIG'][1])
But when I try to iterate the entire dataframe and store it on a dictionaire I get the following
for ind in df['CONFIG'].index:
parsed[ind] = xmltodict.parse(df['CONFIG'][ind])
---------------------------------------------------------------------------
ExpatError Traceback (most recent call last)
/tmp/ipykernel_31/1871123186.py in <module>
1 for ind in df['CONFIG'].index:
----> 2 parsed[ind] = xmltodict.parse(df['CONFIG'][ind])
/opt/conda/lib/python3.9/site-packages/xmltodict.py in parse(xml_input, encoding, expat, process_namespaces, namespace_separator, disable_entities, **kwargs)
325 parser.ParseFile(xml_input)
326 else:
--> 327 parser.Parse(xml_input, True)
328 return handler.item
329
ExpatError: syntax error: line 1, column 0
Can you try this?
for ind in range(len(df['CONFIG'])):
parsed[ind] = xmltodict.parse(df['CONFIG'][ind])
I follow the template and change the link , but it doesn't work
https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/tutorials/model_maker_image_classification.ipynb#scrollTo=3jz5x0JoskPv
This is my datasets
https://firebasestorage.googleapis.com/v0/b/lol-fypproject.appspot.com/o/lol.tgz?alt=media&token=d07b81bd-442f-4ebe-920e-3772598fbb20
original code
image_path = tf.keras.utils.get_file(
'flower_photos.tgz',
'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',
extract=True)
image_path = os.path.join(os.path.dirname(image_path), 'flower_photos')
I changed in that
image_path = tf.keras.utils.get_file(
'lol.tgz',
'https://firebasestorage.googleapis.com/v0/b/lol-fypproject.appspot.com/o/lol.tgz?alt=media&token=d07b81bd-442f-4ebe-920e-3772598fbb20',
extract=True)
image_path = os.path.join(os.path.dirname(image_path), 'lol')
the line wrong and error message is showed
data = ImageClassifierDataLoader.from_folder(image_path)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-15-a5e7646aca55> in <module>()
----> 1 data = ImageClassifierDataLoader.from_folder(image_path)
2 train_data, test_data = data.split(0.9)
/usr/local/lib/python3.7/dist-
packages/tensorflow_examples/lite/model_maker/core/data_util/image_dataloader.py
in
from_folder(cls, filename, shuffle)
69 all_image_size = len(all_image_paths)
70 if all_image_size == 0:
---> 71 raise ValueError('Image size is zero')
72
73 if shuffle:
ValueError: Image size is zero
I have find the problem
the path of the zip file is not the right structure as the sample
I am trying to implement the code for Unsupervised Aspect Extraction from the code available here.
Link to the paper
While implementing Attention class in ml_layers.py, i am getting error in call function at line
y = K.repeat_elements(y, self.steps, axis=1)
Complete code of the function is given below:
def call(self, input_tensor, mask=None):
x = input_tensor[0]
y = input_tensor[1]
mask = mask[0]
y = K.transpose(K.dot(self.W, K.transpose(y)))
y = K.expand_dims(y, axis=-2)
y = K.repeat_elements(y, self.steps, axis=1)
eij = K.sum(x*y, axis=-1)
if self.bias:
b = K.repeat_elements(self.b, self.steps, axis=0)
eij += b
eij = K.tanh(eij)
a = K.exp(eij)
if mask is not None:
a *= K.cast(mask, K.floatx())
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
return a
The error is as follows
Traceback (most recent call last):
File "", line 1, in
model = create_model(ortho_reg, neg_size, emb_dim, aspect_size, emb_path, maxlen, vocab)
File "/home/fractaluser/Projects/workspace/UnsupervisedAspectExtraction/code/model.py", line 32, in create_model
att_weights = Attention(name='att_weights')([e_w, y_s])
File "/home/fractaluser/anaconda3/envs/venv_keras/lib/python3.5/site-packages/keras/engine/base_layer.py", line 457, in call
output = self.call(inputs, **kwargs)
File "/home/fractaluser/Projects/workspace/UnsupervisedAspectExtraction/code/my_layers.py", line 58, in call
y = K.repeat_elements(y, self.steps, axis=1)
File "/home/fractaluser/anaconda3/envs/venv_keras/lib/python3.5/site-packages/keras/backend/tensorflow_backend.py", line 2093, in repeat_elements
return concatenate(x_rep, axis)
File "/home/fractaluser/anaconda3/envs/venv_keras/lib/python3.5/site-packages/keras/backend/tensorflow_backend.py", line 1954, in concatenate
return tf.sparse_concat(axis, tensors)
File "/home/fractaluser/.local/lib/python3.5/site-packages/tensorflow/python/util/deprecation.py", line 488, in new_func
return func(*args, **kwargs)
File "/home/fractaluser/.local/lib/python3.5/site-packages/tensorflow/python/ops/sparse_ops.py", line 316, in sparse_concat
gen_sparse_ops.sparse_concat(inds, vals, shapes, axis, name=name))
File "/home/fractaluser/.local/lib/python3.5/site-packages/tensorflow/python/ops/gen_sparse_ops.py", line 911, in sparse_concat
concat_dim=concat_dim, name=name)
File "/home/fractaluser/.local/lib/python3.5/site-packages/tensorflow/python/framework/op_def_library.py", line 570, in _apply_op_helper
(input_name, op_type_name, len(values), num_attr.minimum))
ValueError: List argument 'indices' to 'SparseConcat' Op with length 0 shorter than minimum length 2.
Could not find any solution on internet. Please help
I used to have this problem
AttributeError: module 'keras.backend' has no attribute 'image_dim_ordering',
So I have to
modify the
K.image_dim_ordering() == 'th'('tf') ==> K.image_data_format() == 'channels_first'(channels_last)
after that, I met the same problem as you. But My problem is there still someplace haven't been correct. After I modify all the places. The problem is gone.
I hope this can help you.
I want to store a pickle file on S3, as a result of a luigi Task. Below is the class that defines the Task:
class CreateItemVocabulariesTask(luigi.Task):
def __init__(self):
self.client = S3Client(AwsConfig().aws_access_key_id,
AwsConfig().aws_secret_access_key)
super().__init__()
def requires(self):
return [GetItem2VecDataTask()]
def run(self):
filename = 'item2vec_results.tsv'
data = self.client.get('s3://{}/item2vec_results.tsv'.format(AwsConfig().item2vec_path),
filename)
df = pd.read_csv(filename, sep='\t', encoding='latin1')
unique_users = df['CustomerId'].unique()
unique_items = df['ProductNumber'].unique()
item_to_int, int_to_item = utils.create_lookup_tables(unique_items)
user_to_int, int_to_user = utils.create_lookup_tables(unique_users)
with self.output()[0].open('wb') as out_file:
pickle.dump(item_to_int, out_file)
with self.output()[1].open('wb') as out_file:
pickle.dump(int_to_item, out_file)
with self.output()[2].open('wb') as out_file:
pickle.dump(user_to_int, out_file)
with self.output()[3].open('wb') as out_file:
pickle.dump(int_to_user, out_file)
def output(self):
files = [S3Target('s3://{}/item2int.pkl'.format(AwsConfig().item2vec_path), client=self.client),
S3Target('s3://{}/int2item.pkl'.format(AwsConfig().item2vec_path), client=self.client),
S3Target('s3://{}/user2int.pkl'.format(AwsConfig().item2vec_path), client=self.client),
S3Target('s3://{}/int2user.pkl'.format(AwsConfig().item2vec_path), client=self.client),]
return files
When I run this task I get the error ValueError: Unsupported open mode 'wb'. The items I try to dump into a pickle file are just python dictionaries.
Full traceback:
Traceback (most recent call last):
File "C:\Anaconda3\lib\site-packages\luigi\worker.py", line 203, in run
new_deps = self._run_get_new_deps()
File "C:\Anaconda3\lib\site-packages\luigi\worker.py", line 140, in _run_get_new_deps
task_gen = self.task.run()
File "C:\Users\user\Documents\python workspace\pipeline.py", line 60, in run
with self.output()[0].open('wb') as out_file:
File "C:\Anaconda3\lib\site-packages\luigi\contrib\s3.py", line 714, in open
raise ValueError("Unsupported open mode '%s'" % mode)
ValueError: Unsupported open mode 'wb'
This is an issue that only happens on python 3.x as explained here. In order to use python 3 and write a binary file or target (ie using 'wb' mode) just set format parameter for S3Target to Nop. Like this:
S3Target('s3://path/to/file', client=self.client, format=luigi.format.Nop)
Notice it's just a trick and not so intuitive nor documented.