when i want to download the datasets by tfds.load(),just like that
ratings = tfds.load('movielens/100k-ratings', split="train")
the error is:
Downloading and preparing dataset Unknown size (download: Unknown size, generated: Unknown size, total: Unknown size) to C:\Users\samsung\tensorflow_datasets\movielens\100k-ratings\0.1.0...
Dl Completed...: 0%
0/1 [00:21<?, ? url/s]
Dl Size...:
0/0 [00:21<?, ? MiB/s]
Extraction completed...:
0/0 [00:21<?, ? file/s]
HTTPConnectionPool(host='files.grouplens.org', port=80): Max retries exceeded with url: /datasets/movielens/ml-100k.zip (Caused by NewConnectionError('<urllib3.connection.HTTPConnection object at 0x000001E81008F910>: Failed to establish a new connection: [WinError 10060]
by the way, I use the company computer.
could someone help me.help!!!
Do you have the same dataset at the download floder
It cannot use the shuffle Fn but you can export and updates
ds = tfds.load('movielens/100k-ratings', split='train', shuffle_files=True)
👉👉👉 ds = ds.shuffle(1024).batch(64).prefetch(tf.data.experimental.AUTOTUNE)
assert isinstance(ds, tf.data.Dataset)
for example in ds.take(1):
print(example)
# {'bucketized_user_age': <tf.Tensor: shape=(), dtype=float32, numpy=45.0>, 'movie_genres': <tf.Tensor: shape=(1,), dtype=int64, numpy=array([7], dtype=int64)>,
# 'movie_id': <tf.Tensor: shape=(), dtype=string, numpy=b'357'>, 'movie_title': <tf.Tensor: shape=(), dtype=string, numpy=b"One Flew Over the Cuckoo's Nest (1975)">,
# 'raw_user_age': <tf.Tensor: shape=(), dtype=float32, numpy=46.0>, 'timestamp': <tf.Tensor: shape=(), dtype=int64, numpy=879024327>,
# 'user_gender': <tf.Tensor: shape=(), dtype=bool, numpy=True>, 'user_id': <tf.Tensor: shape=(), dtype=string, numpy=b'138'>,
# 'user_occupation_label': <tf.Tensor: shape=(), dtype=int64, numpy=4>, 'user_occupation_text': <tf.Tensor: shape=(), dtype=string, numpy=b'doctor'>,
# 'user_rating': <tf.Tensor: shape=(), dtype=float32, numpy=4.0>, 'user_zip_code': <tf.Tensor: shape=(), dtype=string, numpy=b'53211'>}
Related
Below is the simple example:
import os
import psutil
import numpy as np
process = psutil.Process(os.getpid())
class TestKeras3:
def __init__(self):
pass
def build_graph(self):
inputs = tf.keras.Input(shape=(None, None, 3), batch_size=1)
x = tf.keras.layers.Conv2D(100, (2, 2), padding='SAME', name='x')(inputs)
y = tf.reshape(x, (-1,))
z = tf.multiply(y, y)
model = tf.keras.models.Model(inputs=inputs, outputs=z)
return model
def train(self):
model = self.build_graph()
model.summary()
size = np.arange(1000)
for i in range(1000):
inputs = tf.random.normal([1, size[999-i], size[999-i], 3])
with tf.GradientTape() as tage:
output = model(inputs)
print(i, tf.shape(output), process.memory_info().rss)
and the output is:
id output_shape memory cost
979 tf.Tensor([40000], shape=(1,), dtype=int32) 2481123328
980 tf.Tensor([36100], shape=(1,), dtype=int32) 2481582080
981 tf.Tensor([32400], shape=(1,), dtype=int32) 2482122752
982 tf.Tensor([28900], shape=(1,), dtype=int32) 2482393088
983 tf.Tensor([25600], shape=(1,), dtype=int32) 2482933760
984 tf.Tensor([22500], shape=(1,), dtype=int32) 2483453952
985 tf.Tensor([19600], shape=(1,), dtype=int32) 2483793920
986 tf.Tensor([16900], shape=(1,), dtype=int32) 2484330496
987 tf.Tensor([14400], shape=(1,), dtype=int32) 2484871168
988 tf.Tensor([12100], shape=(1,), dtype=int32) 2485137408
989 tf.Tensor([10000], shape=(1,), dtype=int32) 2485665792
990 tf.Tensor([8100], shape=(1,), dtype=int32) 2486206464
991 tf.Tensor([6400], shape=(1,), dtype=int32) 2486579200
992 tf.Tensor([4900], shape=(1,), dtype=int32) 2487119872
993 tf.Tensor([3600], shape=(1,), dtype=int32) 2487390208
994 tf.Tensor([2500], shape=(1,), dtype=int32) 2487930880
995 tf.Tensor([1600], shape=(1,), dtype=int32) 2488463360
996 tf.Tensor([900], shape=(1,), dtype=int32) 2488811520
997 tf.Tensor([400], shape=(1,), dtype=int32) 2489335808
998 tf.Tensor([100], shape=(1,), dtype=int32) 2489868288
999 tf.Tensor([0], shape=(1,), dtype=int32) 2490241024
I found that every time I changed the size of the input, the consumption of memory also increased.
I have a question that the size (2,2,3,100) of the conv2D parameter in the model is fixed. Is it true that the model will cache some Tensor during the forward calculation process, which will cause the memory to increase all the time? If so, how can these resources be released during training? If not, what else is the reason?
So after trying many method, i solved this problem.
It seems that using tf common operation in a keras graph will cause a memory leak, which can be solved by packaging the tf common op into the tf.keras.layers.Layer subclass.
class ReshapeMulti(tf.keras.layers.Layer):
def __init__(self):
super(ReshapeMulti, self).__init__()
def call(self, inputs):
y = tf.reshape(inputs, (-1, ))
z = tf.multiply(y, y)
return z
class TestKeras3:
def __init__(self):
pass
def build_graph(self):
inputs = tf.keras.Input(shape=(None, None, 3), batch_size=1)
x = tf.keras.layers.Conv2D(100, (2, 2), padding='SAME', name='x')(inputs)
# y = tf.reshape(x, (-1,))
# z = tf.multiply(y, y)
z = ReshapeMulti()(x)
model = tf.keras.models.Model(inputs=inputs, outputs=z)
return model
def train(self):
model = self.build_graph()
model.summary()
size = np.arange(1000)
for i in range(1000):
inputs = tf.random.normal([1, size[999-i], size[999-i], 3])
with tf.GradientTape() as tage:
output = model(inputs)
print(i, tf.shape(output), process.memory_info().rss)
I would like to calculate the gradients on tensorflow_probability layers using tf.GradientTape(). This is rather simple using a normal, e.g., Dense layer
inp = tf.random.normal((2,5))
layer = tf.keras.layers.Dense(10)
with tf.GradientTape() as tape:
out = layer(inp)
loss = tf.reduce_mean(1-out)
grads = tape.gradient(loss, layer.trainable_variables)
print(grads)
[<tf.Tensor: shape=(5, 10), dtype=float32, numpy=
array([[ 0.04086879, 0.04086879, -0.02974391, 0.04086879, 0.04086879,
0.04086879, -0.02974391, 0.04086879, -0.02974391, -0.07061271],
[ 0.01167339, 0.01167339, -0.02681615, 0.01167339, 0.01167339,
0.01167339, -0.02681615, 0.01167339, -0.02681615, -0.03848954],
[ 0.00476769, 0.00476769, -0.00492069, 0.00476769, 0.00476769,
0.00476769, -0.00492069, 0.00476769, -0.00492069, -0.00968838],
[-0.00462376, -0.00462376, 0.05914849, -0.00462376, -0.00462376,
-0.00462376, 0.05914849, -0.00462376, 0.05914849, 0.06377225],
[-0.11682947, -0.11682947, -0.06357963, -0.11682947, -0.11682947,
-0.11682947, -0.06357963, -0.11682947, -0.06357963, 0.05324984]],
dtype=float32)>,
<tf.Tensor: shape=(10,), dtype=float32, numpy=
array([-0.05, -0.05, -0.1 , -0.05, -0.05, -0.05, -0.1 , -0.05, -0.1 ,
-0.05], dtype=float32)>]
But if I do this using DenseReparameterization, the grads register None.
inp = tf.random.normal((2,5))
layer = tfp.layers.DenseReparameterization(10)
with tf.GradientTape() as tape:
out = layer(inp)
loss = tf.reduce_mean(1-out)
grads = tape.gradient(loss, layer.trainable_variables)
print(grads)
[None, None, None]
Can anyone tell me how to fix this issue such that the gradients are taped and register?
Aha, that's it! I am using tf v2.1.0. Apparently that does not work well with tensorflow_probability. I will upgrade asap. Thank you gobrewers14.
When I try to get the model from tensorflow-hub resporitory.
I can see it as a Saved Model format, but I cant get access to model architecture as well as weights store for each layer.
import tensorflow_hub as hub
model = hub.load("https://tfhub.dev/tensorflow/centernet/hourglass_512x512/1")
)
Is there any formal way to work with it?
All the attribute I can get through model.__dict__ is not clear for a specific layer in the original model.
{'_self_setattr_tracking': True,
'_self_unconditional_checkpoint_dependencies': [TrackableReference(name='_model', ref=<tensorflow.python.saved_model.load.Loader._recreate_base_user_object.<locals>._UserObject object at 0x7fe4e4914710>),
TrackableReference(name='signatures', ref=_SignatureMap({'serving_default': <ConcreteFunction signature_wrapper(input_tensor) at 0x7FE4E601F210>})),
TrackableReference(name='_self_saveable_object_factories', ref=DictWrapper({}))],
'_self_unconditional_dependency_names': {'_model': <tensorflow.python.saved_model.load.Loader._recreate_base_user_object.<locals>._UserObject at 0x7fe4e4914710>,
'signatures': _SignatureMap({'serving_default': <ConcreteFunction signature_wrapper(input_tensor) at 0x7FE4E601F210>}),
'_self_saveable_object_factories': {}},
'_self_unconditional_deferred_dependencies': {},
'_self_update_uid': 176794,
'_self_name_based_restores': set(),
'_self_saveable_object_factories': {},
'_model': <tensorflow.python.saved_model.load.Loader._recreate_base_user_object.<locals>._UserObject at 0x7fe4e4914710>,
'signatures': _SignatureMap({'serving_default': <ConcreteFunction signature_wrapper(input_tensor) at 0x7FE4E601F210>}),
'__call__': <tensorflow.python.saved_model.function_deserialization.RestoredFunction at 0x7fe315a28950>,
'graph_debug_info': ,
'tensorflow_version': '2.4.0',
'tensorflow_git_version': 'unknown'}
I have also tried with model.signatures['serving_default'].__dict__, the Tensor represents for each layer is not visible
[<tf.Tensor: shape=(), dtype=resource, numpy=<unprintable>>,
<tf.Tensor: shape=(), dtype=resource, numpy=<unprintable>>,
<tf.Tensor: shape=(), dtype=resource, numpy=<unprintable>>,
<tf.Tensor: shape=(), dtype=resource, numpy=<unprintable>>,
<tf.Tensor: shape=(), dtype=resource, numpy=<unprintable>>,
<tf.Tensor: shape=(), dtype=resource, numpy=<unprintable>>,
<tf.Tensor: shape=(), dtype=resource, numpy=<unprintable>>,
<tf.Tensor: shape=(), dtype=resource, numpy=<unprintable>>,
<tf.Tensor: shape=(), dtype=resource, numpy=<unprintable>>,
<tf.Tensor: shape=(), dtype=resource, numpy=<unprintable>>,
<tf.Tensor: shape=(), dtype=resource, numpy=<unprintable>>,
<tf.Tensor: shape=(), dtype=resource, numpy=<unprintable>>,
<tf.Tensor: shape=(), dtype=resource, numpy=<unprintable>>,
<tf.Tensor: shape=(), dtype=resource, numpy=<unprintable>>],
With the CLI tool saved_model_cli provided by the package tensorflow-serving-api it's possible to inspect a saved model. In the first step I downloaded and cached the model:
from os import environ
import tensorflow_hub as hub
environ['TFHUB_CACHE_DIR'] = '/Users/you/.cache/tfhub_modules'
hub.load("https://tfhub.dev/tensorflow/centernet/hourglass_512x512/1")
Then I inspected the signatures and layers:
saved_model_cli show --dir /Users/you/.cache/tfhub_modules/3085eb2fbe2ad0b69801d50844c97b7a7a5ecade --all
MetaGraphDef with tag-set: 'serve' contains the following SignatureDefs:
signature_def['__saved_model_init_op']:
The given SavedModel SignatureDef contains the following input(s):
The given SavedModel SignatureDef contains the following output(s):
outputs['__saved_model_init_op'] tensor_info:
dtype: DT_INVALID
shape: unknown_rank
name: NoOp
Method name is:
signature_def['serving_default']:
The given SavedModel SignatureDef contains the following input(s):
inputs['input_tensor'] tensor_info:
dtype: DT_UINT8
shape: (1, -1, -1, 3)
name: serving_default_input_tensor:0
The given SavedModel SignatureDef contains the following output(s):
outputs['detection_boxes'] tensor_info:
dtype: DT_FLOAT
shape: (1, 100, 4)
name: StatefulPartitionedCall:0
outputs['detection_classes'] tensor_info:
dtype: DT_FLOAT
shape: (1, 100)
name: StatefulPartitionedCall:1
outputs['detection_scores'] tensor_info:
dtype: DT_FLOAT
shape: (1, 100)
name: StatefulPartitionedCall:2
outputs['num_detections'] tensor_info:
dtype: DT_FLOAT
shape: (1)
name: StatefulPartitionedCall:3
Method name is: tensorflow/serving/predict
After that I used the debugger to understand how a saved model works internally and found the member fields variables and trainable_variables in model.signatures['serving_default'] which stores the data (weights, ...) of the model. Here you see the output of model.signatures['serving_default'].variables:
Short summary for the answer. We can access to the variables of a layer by model.signatures['serving_default'].variables
Can't figure out what to use instead of Iterator
I tried tf.compat.v1.data.Iterator instead but got another error - AttributeError: 'PrefetchDataset' object has no attribute 'output_types'
code:
train_ds = prepare_for_train(labeled_ds)
val_ds = tf.data.Dataset.from_tensor_slices(test_data)
#create a iterator with shape and type
iter = tf.data.Iterator.from_structure(train_ds.output_types, train_ds.output_shapes)
"""iter= tf.compat.v1.data.Iterator.from_structure(train_ds.output_types, train_ds.output_shapes)"""
print(iter)
*AttributeError: module 'tensorflow_core._api.v2.data' has no attribute 'Iterator'*
My TF version 2.2.0-dev20200212
Thank you!
I was able to reproduce your error. Here is how you can fix it in Tensorflow Version 2.x.
You need to define iter as below -
iter = tf.compat.v1.data.Iterator.from_structure(tf.compat.v1.data.get_output_types(train_dataset),
tf.compat.v1.data.get_output_shapes(train_dataset))
Below is an example -
Code -
%tensorflow_version 2.x
import tensorflow as tf
print(tf.__version__)
import numpy as np
# Reinitializable iterator to switch between Datasets
EPOCHS = 10
# making fake data using numpy
train_data = (np.random.sample((100,2)), np.random.sample((100,1)))
# create two datasets, one for training and one for test
train_dataset = tf.data.Dataset.from_tensor_slices(train_data)
# create a iterator of the correct shape and type
iter = tf.compat.v1.data.Iterator.from_structure(tf.compat.v1.data.get_output_types(train_dataset),
tf.compat.v1.data.get_output_shapes(train_dataset))
# create the initialisation operations
train_init_op = iter.make_initializer(train_dataset)
features, labels = iter.get_next()
for _ in range(EPOCHS):
print([features, labels])
Output -
2.1.0
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/data/ops/iterator_ops.py:347: Iterator.output_types (from tensorflow.python.data.ops.iterator_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.compat.v1.data.get_output_types(iterator)`.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/data/ops/iterator_ops.py:348: Iterator.output_shapes (from tensorflow.python.data.ops.iterator_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.compat.v1.data.get_output_shapes(iterator)`.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/data/ops/iterator_ops.py:350: Iterator.output_classes (from tensorflow.python.data.ops.iterator_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.compat.v1.data.get_output_classes(iterator)`.
[<tf.Tensor: shape=(2,), dtype=float64, numpy=array([0.35431711, 0.07564416])>, <tf.Tensor: shape=(1,), dtype=float64, numpy=array([0.38728039])>]
[<tf.Tensor: shape=(2,), dtype=float64, numpy=array([0.35431711, 0.07564416])>, <tf.Tensor: shape=(1,), dtype=float64, numpy=array([0.38728039])>]
[<tf.Tensor: shape=(2,), dtype=float64, numpy=array([0.35431711, 0.07564416])>, <tf.Tensor: shape=(1,), dtype=float64, numpy=array([0.38728039])>]
[<tf.Tensor: shape=(2,), dtype=float64, numpy=array([0.35431711, 0.07564416])>, <tf.Tensor: shape=(1,), dtype=float64, numpy=array([0.38728039])>]
[<tf.Tensor: shape=(2,), dtype=float64, numpy=array([0.35431711, 0.07564416])>, <tf.Tensor: shape=(1,), dtype=float64, numpy=array([0.38728039])>]
[<tf.Tensor: shape=(2,), dtype=float64, numpy=array([0.35431711, 0.07564416])>, <tf.Tensor: shape=(1,), dtype=float64, numpy=array([0.38728039])>]
[<tf.Tensor: shape=(2,), dtype=float64, numpy=array([0.35431711, 0.07564416])>, <tf.Tensor: shape=(1,), dtype=float64, numpy=array([0.38728039])>]
[<tf.Tensor: shape=(2,), dtype=float64, numpy=array([0.35431711, 0.07564416])>, <tf.Tensor: shape=(1,), dtype=float64, numpy=array([0.38728039])>]
[<tf.Tensor: shape=(2,), dtype=float64, numpy=array([0.35431711, 0.07564416])>, <tf.Tensor: shape=(1,), dtype=float64, numpy=array([0.38728039])>]
[<tf.Tensor: shape=(2,), dtype=float64, numpy=array([0.35431711, 0.07564416])>, <tf.Tensor: shape=(1,), dtype=float64, numpy=array([0.38728039])>]
Hope this answers your question. Happy Learning.
tf.compat.v1.disable_eager_execution()
train_ds = prepare_for_train(labeled_ds)
val_ds = tf.data.Dataset.from_tensor_slices(test_data)
#create a iterator with shape and type
iter = tf.data.Iterator.from_structure(train_ds.output_types, train_ds.output_shapes)
"""iter= tf.compat.v1.data.Iterator.from_structure(train_ds.output_types, train_ds.output_shapes)"""
print(iter)
Using this should solve the issue
*AttributeError: module 'tensorflow_core._api.v2.data' has no attribute 'Iterator'*
I use tensorflow eager execution to do the following calculation:
y = x^2
z = y + 2.
My goal is to calculate dz/dx and dz/dy (the gradients of z over y and z)
dx, dy = GradientTape.gradient(z, [x, y]).
However, only dy is calculated and dx is None. Namely, only the gradients of tensors that directly rely on z can be calculated.
[None, <tf.Tensor: id=11, shape=(), dtype=float32, numpy=1.0>]
[None, <tf.Tensor: id=11, shape=(), dtype=float32, numpy=1.0>]
[None, <tf.Tensor: id=11, shape=(), dtype=float32, numpy=1.0>]
[None, <tf.Tensor: id=11, shape=(), dtype=float32, numpy=1.0>]
[None, <tf.Tensor: id=11, shape=(), dtype=float32, numpy=1.0>]
The following is the full code.
from __future__ import absolute_import, division, print_function
import tensorflow as tf
tf.enable_eager_execution()
tfe = tf.contrib.eager
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "6"
import warnings
warnings.filterwarnings('ignore')
train_steps = 5
for i in range(train_steps):
x = tf.contrib.eager.Variable(0.)
with tf.GradientTape() as tape:
y = tf.square(x)
z = y + 2
print(tape.gradient(z, [x,y]))
Any solution?