Tensorflow: sample of dataset gets shape None in map - tensorflow

I have a dataset of spectrograms (images) of shape (128x128x1), I want to do dataaugmentation on it. But when I try to,
def augment(stft, label):
print(stft.shape)
stft = tf.image.random_brightness(stft, 0.2)
print(stft.shape)
stft = tf.keras.preprocessing.image.random_shift(x=stft, wrg=0.1, hrg=0.1, row_axis=0, col_axis=1, channel_axis=2, fill_mode='wrap')
return stft, label
val_ds= (
val_ds.map(augment, num_parallel_calls=config.AUTOTUNE).prefetch(config.AUTOTUNE))
I get the following output and error:
(None, 128, 1)
(None, 128, 1)
Traceback (most recent call last):
File "train.py", line 132, in <module>
val_ds.map(augment, num_parallel_calls=config.AUTOTUNE).prefetch(config.AUTOTUNE))
File "/usr/local/lib64/python3.8/site-packages/tensorflow/python/data/ops/dataset_ops.py", line 1623, in map
return ParallelMapDataset(
File "/usr/local/lib64/python3.8/site-packages/tensorflow/python/data/ops/dataset_ops.py", line 4016, in __init__
self._map_func = StructuredFunctionWrapper(
File "/usr/local/lib64/python3.8/site-packages/tensorflow/python/data/ops/dataset_ops.py", line 3221, in __init__
self._function = wrapper_fn.get_concrete_function()
File "/usr/local/lib64/python3.8/site-packages/tensorflow/python/eager/function.py", line 2531, in get_concrete_function
graph_function = self._get_concrete_function_garbage_collected(
File "/usr/local/lib64/python3.8/site-packages/tensorflow/python/eager/function.py", line 2496, in _get_concrete_function_garbage_collected
graph_function, args, kwargs = self._maybe_define_function(args, kwargs)
File "/usr/local/lib64/python3.8/site-packages/tensorflow/python/eager/function.py", line 2777, in _maybe_define_function
graph_function = self._create_graph_function(args, kwargs)
File "/usr/local/lib64/python3.8/site-packages/tensorflow/python/eager/function.py", line 2657, in _create_graph_function
func_graph_module.func_graph_from_py_func(
File "/usr/local/lib64/python3.8/site-packages/tensorflow/python/framework/func_graph.py", line 981, in func_graph_from_py_func
func_outputs = python_func(*func_args, **func_kwargs)
File "/usr/local/lib64/python3.8/site-packages/tensorflow/python/data/ops/dataset_ops.py", line 3214, in wrapper_fn
ret = _wrapper_helper(*args)
File "/usr/local/lib64/python3.8/site-packages/tensorflow/python/data/ops/dataset_ops.py", line 3156, in _wrapper_helper
ret = autograph.tf_convert(func, ag_ctx)(*nested_args)
File "/usr/local/lib64/python3.8/site-packages/tensorflow/python/autograph/impl/api.py", line 265, in wrapper
raise e.ag_error_metadata.to_exception(e)
TypeError: in user code:
train.py:128 augment *
stft = tf.keras.preprocessing.image.random_shift(x=stft, wrg=0.1, hrg=0.1, row_axis=0, col_axis=1, channel_axis=2, fill_mode='wrap')
/usr/local/lib64/python3.8/site-packages/keras_preprocessing/image/affine_transformations.py:85 random_shift *
tx = np.random.uniform(-hrg, hrg) * h
TypeError: unsupported operand type(s) for *: 'float' and 'NoneType'
Why is the shape of the tensor passed to the mapped function (None, 128, 1) and how to fix this problem?
Thanks,

You need to specify a batch size.
add
val_ds.batch(32)
before your map.

Related

Visualize proposal regions from RPN head in Faster R-CNN with Tensorflow Object Detection API

I'm trying debug my trained Faster R-CNN model using Tensorflow Object Detection API and I want to visualize the proposal regions of RPN on an image. Can anyone tell me how to do it?
I found a post here but it hasn't been answered. I tried to export the model using exporter_main_v2.py with only the RPN head as said here and this is the massage when I deleted the second_stage.
Traceback (most recent call last):
File "exporter_main_v2.py", line 165, in <module>
app.run(main)
File "E:\Anaconda\envs\TFOD\lib\site-packages\absl\app.py", line 312, in run
_run_main(main, args)
File "E:\Anaconda\envs\TFOD\lib\site-packages\absl\app.py", line 258, in _run_main
sys.exit(main(argv))
File "exporter_main_v2.py", line 158, in main
exporter_lib_v2.export_inference_graph(
File "E:\Anaconda\envs\TFOD\lib\site-packages\object_detection\exporter_lib_v2.py", line 245, in export_inference_graph
detection_model = INPUT_BUILDER_UTIL_MAP['model_build'](
File "E:\Anaconda\envs\TFOD\lib\site-packages\object_detection\builders\model_builder.py", line 1226, in build
return build_func(getattr(model_config, meta_architecture), is_training,
File "E:\Anaconda\envs\TFOD\lib\site-packages\object_detection\builders\model_builder.py", line 665, in _build_faster_rcnn_model
second_stage_box_predictor = box_predictor_builder.build_keras(
File "E:\Anaconda\envs\TFOD\lib\site-packages\object_detection\builders\box_predictor_builder.py", line 991, in build_keras
raise ValueError(
ValueError: Unknown box predictor for Keras: None
I tried again to export the model without deleting the second_stage. And this is the message I got
INFO:tensorflow:depth of additional conv before box predictor: 0
I0802 20:55:13.930429 1996 convolutional_keras_box_predictor.py:153] depth of additional conv before box predictor: 0
Traceback (most recent call last):
File "exporter_main_v2.py", line 165, in <module>
app.run(main)
File "E:\Anaconda\envs\TFOD\lib\site-packages\absl\app.py", line 312, in run
_run_main(main, args)
File "E:\Anaconda\envs\TFOD\lib\site-packages\absl\app.py", line 258, in _run_main
sys.exit(main(argv))
File "exporter_main_v2.py", line 158, in main
exporter_lib_v2.export_inference_graph(
File "E:\Anaconda\envs\TFOD\lib\site-packages\object_detection\exporter_lib_v2.py", line 271, in export_inference_graph
concrete_function = detection_module.__call__.get_concrete_function()
File "E:\Anaconda\envs\TFOD\lib\site-packages\tensorflow\python\eager\def_function.py", line 1299, in get_concrete_function
concrete = self._get_concrete_function_garbage_collected(*args, **kwargs)
File "E:\Anaconda\envs\TFOD\lib\site-packages\tensorflow\python\eager\def_function.py", line 1205, in _get_concrete_function_garbage_collected
self._initialize(args, kwargs, add_initializers_to=initializers)
File "E:\Anaconda\envs\TFOD\lib\site-packages\tensorflow\python\eager\def_function.py", line 725, in _initialize
self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
File "E:\Anaconda\envs\TFOD\lib\site-packages\tensorflow\python\eager\function.py", line 2969, in _get_concrete_function_internal_garbage_collected
graph_function, _ = self._maybe_define_function(args, kwargs)
File "E:\Anaconda\envs\TFOD\lib\site-packages\tensorflow\python\eager\function.py", line 3361, in _maybe_define_function
graph_function = self._create_graph_function(args, kwargs)
File "E:\Anaconda\envs\TFOD\lib\site-packages\tensorflow\python\eager\function.py", line 3196, in _create_graph_function
func_graph_module.func_graph_from_py_func(
File "E:\Anaconda\envs\TFOD\lib\site-packages\tensorflow\python\framework\func_graph.py", line 990, in func_graph_from_py_func
func_outputs = python_func(*func_args, **func_kwargs)
File "E:\Anaconda\envs\TFOD\lib\site-packages\tensorflow\python\eager\def_function.py", line 634, in wrapped_fn
out = weak_wrapped_fn().__wrapped__(*args, **kwds)
File "E:\Anaconda\envs\TFOD\lib\site-packages\tensorflow\python\framework\func_graph.py", line 977, in wrapper
raise e.ag_error_metadata.to_exception(e)
tensorflow.python.autograph.pyct.error_utils.KeyError: in user code:
E:\Anaconda\envs\TFOD\lib\site-packages\object_detection\exporter_lib_v2.py:163 call_func *
return self._run_inference_on_images(images, true_shapes, **kwargs)
E:\Anaconda\envs\TFOD\lib\site-packages\object_detection\exporter_lib_v2.py:129 _run_inference_on_images *
detections[classes_field] = (
KeyError: 'detection_classes'
Found the solution!
In the config file add number_of_stages: 1
Instead of using exporter_main_v2.pyI write code that builds the model from the checkpoint file
# Load pipeline config and build a detection model
configs = config_util.get_configs_from_pipeline_file(path_to_config)
model_config = configs['model']
detection_model = model_builder.build(model_config=model_config, is_training=False)
# Restore checkpoint
ckpt = tf.compat.v2.train.Checkpoint(model=detection_model)
ckpt.restore(os.path.join(path_to_ckpt, 'ckpt-0')).expect_partial()
Then I feed the image I need to inspect to the model, then I use object_detection.utils.visualization_utils.visualize_boxes_and_labels_on_image_array to inspect the boxes

Reimplementing bert-style pooler throws shape error as if length-dimension were still needed

I have trained an off-the-shelf Transformer().
Now I want to use the encoder in order to build a classifier. For that I want to only use the first token's output (bert-style cls-token-result) and run that through a dense layer.
What I do:
tl.Serial(encoder, tl.Fn('pooler', lambda x: (x[:, 0, :])), tl.Dense(7))
Shapes:
The encoder gives me shape (64, 50, 512)
with
64 = batch_size,
50 = seq_len,
512 = model_dim
The pooler gives me shape (64, 512) which is as expected and desired.
The dense layer is supposed to take the 512 dimensions for each batchmember and classify over 7 classes. But I guess trax/jax still expects this to have length seq_len (50).
TypeError: dot_general requires contracting dimensions to have the same shape, got [512] and [50].
What do I miss?
Full traceback:
Traceback (most recent call last):
File "mikado_classes.py", line 2054, in <module>
app.run(main)
File "/root/.local/lib/python3.7/site-packages/absl/app.py", line 300, in run
_run_main(main, args)
File "/root/.local/lib/python3.7/site-packages/absl/app.py", line 251, in _run_main
sys.exit(main(argv))
File "mikado_classes.py", line 1153, in main
loop_neu.run(2)
File "/root/.local/lib/python3.7/site-packages/trax/supervised/training.py", line 361, in run
loss, optimizer_metrics = self._run_one_step(task_index, task_changed)
File "/root/.local/lib/python3.7/site-packages/trax/supervised/training.py", line 483, in _run_one_step
batch, rng, step=step, learning_rate=learning_rate
File "/root/.local/lib/python3.7/site-packages/trax/optimizers/trainer.py", line 134, in one_step
(weights, self._slots), step, self._opt_params, batch, state, rng)
File "/root/.local/lib/python3.7/site-packages/trax/optimizers/trainer.py", line 173, in single_device_update_fn
batch, weights, state, rng)
File "/root/.local/lib/python3.7/site-packages/trax/layers/base.py", line 549, in pure_fn
self._caller, signature(x), trace) from None
jax._src.traceback_util.FilteredStackTrace: trax.layers.base.LayerError: Exception passing through layer Serial (in pure_fn):
layer created in file [...]/trax/supervised/training.py, line 865
layer input shapes: (ShapeDtype{shape:(64, 50), dtype:int32}, ShapeDtype{shape:(64, 1), dtype:int32}, ShapeDtype{shape:(64, 1), dtype:int32})
File [...]/trax/layers/combinators.py, line 88, in forward
outputs, s = layer.pure_fn(inputs, w, s, rng, use_cache=True)
LayerError: Exception passing through layer Serial (in pure_fn):
layer created in file [...]/mikado_classes.py, line 1134
layer input shapes: (ShapeDtype{shape:(64, 50), dtype:int32}, ShapeDtype{shape:(64, 1), dtype:int32}, ShapeDtype{shape:(64, 1), dtype:int32})
File [...]/trax/layers/combinators.py, line 88, in forward
outputs, s = layer.pure_fn(inputs, w, s, rng, use_cache=True)
LayerError: Exception passing through layer Dense_7 (in pure_fn):
layer created in file [...]/mikado_classes.py, line 1133
layer input shapes: ShapeDtype{shape:(64, 512), dtype:float32}
File [...]/trax/layers/assert_shape.py, line 122, in forward_wrapper
y = forward(self, x, *args, **kwargs)
File [...]/trax/layers/core.py, line 95, in forward
return jnp.dot(x, w) + b # Affine map.
File [...]/_src/numpy/lax_numpy.py, line 3498, in dot
return lax.dot_general(a, b, (contract_dims, batch_dims), precision)
File [...]/_src/lax/lax.py, line 674, in dot_general
preferred_element_type=preferred_element_type)
File [...]/site-packages/jax/core.py, line 282, in bind
out = top_trace.process_primitive(self, tracers, params)
File [...]/jax/interpreters/ad.py, line 285, in process_primitive
primal_out, tangent_out = jvp(primals_in, tangents_in, **params)
File [...]/jax/interpreters/ad.py, line 458, in standard_jvp
val_out = primitive.bind(*primals, **params)
File [...]/site-packages/jax/core.py, line 282, in bind
out = top_trace.process_primitive(self, tracers, params)
File [...]/jax/interpreters/partial_eval.py, line 140, in process_primitive
return self.default_process_primitive(primitive, tracers, params)
File [...]/jax/interpreters/partial_eval.py, line 147, in default_process_primitive
return primitive.bind(*consts, **params)
File [...]/site-packages/jax/core.py, line 282, in bind
out = top_trace.process_primitive(self, tracers, params)
File [...]/jax/interpreters/partial_eval.py, line 1058, in process_primitive
out_avals = primitive.abstract_eval(*avals, **params)
File [...]/_src/lax/lax.py, line 1992, in standard_abstract_eval
shapes, dtypes = shape_rule(*args, **kwargs), dtype_rule(*args, **kwargs)
File [...]/_src/lax/lax.py, line 3090, in _dot_general_shape_rule
raise TypeError(msg.format(lhs_contracting_shape, rhs_contracting_shape))
TypeError: dot_general requires contracting dimensions to have the same shape, got [512] and [50].
The stack trace above excludes JAX-internal frames.
The following is the original exception that occurred, unmodified.
--------------------
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "mikado_classes.py", line 2054, in <module>
app.run(main)
File "/root/.local/lib/python3.7/site-packages/absl/app.py", line 300, in run
_run_main(main, args)
File "/root/.local/lib/python3.7/site-packages/absl/app.py", line 251, in _run_main
sys.exit(main(argv))
File "mikado_classes.py", line 1153, in main
loop_neu.run(2)
File "/root/.local/lib/python3.7/site-packages/trax/supervised/training.py", line 361, in run
loss, optimizer_metrics = self._run_one_step(task_index, task_changed)
File "/root/.local/lib/python3.7/site-packages/trax/supervised/training.py", line 483, in _run_one_step
batch, rng, step=step, learning_rate=learning_rate
File "/root/.local/lib/python3.7/site-packages/trax/optimizers/trainer.py", line 134, in one_step
(weights, self._slots), step, self._opt_params, batch, state, rng)
File "/root/.local/lib/python3.7/site-packages/jax/_src/traceback_util.py", line 139, in reraise_with_filtered_traceback
return fun(*args, **kwargs)
File "/root/.local/lib/python3.7/site-packages/jax/api.py", line 398, in f_jitted
return cpp_jitted_f(context, *args, **kwargs)
File "/root/.local/lib/python3.7/site-packages/jax/api.py", line 295, in cache_miss
donated_invars=donated_invars)
File "/root/.local/lib/python3.7/site-packages/jax/core.py", line 1275, in bind
return call_bind(self, fun, *args, **params)
File "/root/.local/lib/python3.7/site-packages/jax/core.py", line 1266, in call_bind
outs = primitive.process(top_trace, fun, tracers, params)
File "/root/.local/lib/python3.7/site-packages/jax/core.py", line 1278, in process
return trace.process_call(self, fun, tracers, params)
File "/root/.local/lib/python3.7/site-packages/jax/core.py", line 631, in process_call
return primitive.impl(f, *tracers, **params)
File "/root/.local/lib/python3.7/site-packages/jax/interpreters/xla.py", line 581, in _xla_call_impl
*unsafe_map(arg_spec, args))
File "/root/.local/lib/python3.7/site-packages/jax/linear_util.py", line 260, in memoized_fun
ans = call(fun, *args)
File "/root/.local/lib/python3.7/site-packages/jax/interpreters/xla.py", line 656, in _xla_callable
jaxpr, out_avals, consts = pe.trace_to_jaxpr_final(fun, abstract_args)
File "/root/.local/lib/python3.7/site-packages/jax/interpreters/partial_eval.py", line 1216, in trace_to_jaxpr_final
jaxpr, out_avals, consts = trace_to_subjaxpr_dynamic(fun, main, in_avals)
File "/root/.local/lib/python3.7/site-packages/jax/interpreters/partial_eval.py", line 1196, in trace_to_subjaxpr_dynamic
ans = fun.call_wrapped(*in_tracers)
File "/root/.local/lib/python3.7/site-packages/jax/linear_util.py", line 166, in call_wrapped
ans = self.f(*args, **dict(self.params, **kwargs))
File "/root/.local/lib/python3.7/site-packages/trax/optimizers/trainer.py", line 173, in single_device_update_fn
batch, weights, state, rng)
File "/root/.local/lib/python3.7/site-packages/jax/_src/traceback_util.py", line 139, in reraise_with_filtered_traceback
return fun(*args, **kwargs)
File "/root/.local/lib/python3.7/site-packages/jax/api.py", line 810, in value_and_grad_f
ans, vjp_py, aux = _vjp(f_partial, *dyn_args, has_aux=True)
File "/root/.local/lib/python3.7/site-packages/jax/api.py", line 1918, in _vjp
out_primal, out_vjp, aux = ad.vjp(flat_fun, primals_flat, has_aux=True)
File "/root/.local/lib/python3.7/site-packages/jax/interpreters/ad.py", line 116, in vjp
out_primals, pvals, jaxpr, consts, aux = linearize(traceable, *primals, has_aux=True)
File "/root/.local/lib/python3.7/site-packages/jax/interpreters/ad.py", line 101, in linearize
jaxpr, out_pvals, consts = pe.trace_to_jaxpr(jvpfun_flat, in_pvals)
File "/root/.local/lib/python3.7/site-packages/jax/interpreters/partial_eval.py", line 506, in trace_to_jaxpr
jaxpr, (out_pvals, consts, env) = fun.call_wrapped(pvals)
File "/root/.local/lib/python3.7/site-packages/jax/linear_util.py", line 166, in call_wrapped
ans = self.f(*args, **dict(self.params, **kwargs))
File "/root/.local/lib/python3.7/site-packages/trax/layers/base.py", line 549, in pure_fn
self._caller, signature(x), trace) from None
trax.layers.base.LayerError: Exception passing through layer Serial (in pure_fn):
layer created in file [...]/trax/supervised/training.py, line 865
layer input shapes: (ShapeDtype{shape:(64, 50), dtype:int32}, ShapeDtype{shape:(64, 1), dtype:int32}, ShapeDtype{shape:(64, 1), dtype:int32})
File [...]/trax/layers/combinators.py, line 88, in forward
outputs, s = layer.pure_fn(inputs, w, s, rng, use_cache=True)
LayerError: Exception passing through layer Serial (in pure_fn):
layer created in file [...]/mikado_classes.py, line 1134
layer input shapes: (ShapeDtype{shape:(64, 50), dtype:int32}, ShapeDtype{shape:(64, 1), dtype:int32}, ShapeDtype{shape:(64, 1), dtype:int32})
File [...]/trax/layers/combinators.py, line 88, in forward
outputs, s = layer.pure_fn(inputs, w, s, rng, use_cache=True)
LayerError: Exception passing through layer Dense_7 (in pure_fn):
layer created in file [...]/mikado_classes.py, line 1133
layer input shapes: ShapeDtype{shape:(64, 512), dtype:float32}
File [...]/trax/layers/assert_shape.py, line 122, in forward_wrapper
y = forward(self, x, *args, **kwargs)
File [...]/trax/layers/core.py, line 95, in forward
return jnp.dot(x, w) + b # Affine map.
File [...]/_src/numpy/lax_numpy.py, line 3498, in dot
return lax.dot_general(a, b, (contract_dims, batch_dims), precision)
File [...]/_src/lax/lax.py, line 674, in dot_general
preferred_element_type=preferred_element_type)
File [...]/site-packages/jax/core.py, line 282, in bind
out = top_trace.process_primitive(self, tracers, params)
File [...]/jax/interpreters/ad.py, line 285, in process_primitive
primal_out, tangent_out = jvp(primals_in, tangents_in, **params)
File [...]/jax/interpreters/ad.py, line 458, in standard_jvp
val_out = primitive.bind(*primals, **params)
File [...]/site-packages/jax/core.py, line 282, in bind
out = top_trace.process_primitive(self, tracers, params)
File [...]/jax/interpreters/partial_eval.py, line 140, in process_primitive
return self.default_process_primitive(primitive, tracers, params)
File [...]/jax/interpreters/partial_eval.py, line 147, in default_process_primitive
return primitive.bind(*consts, **params)
File [...]/site-packages/jax/core.py, line 282, in bind
out = top_trace.process_primitive(self, tracers, params)
File [...]/jax/interpreters/partial_eval.py, line 1058, in process_primitive
out_avals = primitive.abstract_eval(*avals, **params)
File [...]/_src/lax/lax.py, line 1992, in standard_abstract_eval
shapes, dtypes = shape_rule(*args, **kwargs), dtype_rule(*args, **kwargs)
File [...]/_src/lax/lax.py, line 3090, in _dot_general_shape_rule
raise TypeError(msg.format(lhs_contracting_shape, rhs_contracting_shape))
TypeError: dot_general requires contracting dimensions to have the same shape, got [512] and [50].
The mistake was not in the architecture. Problem was: My inputs were not shaped correctly.
The target should have been of shape (batch_size, ) but I sent (batch_size, 1). So a target array should have been, e.g.:
[1, 5, 99, 2, 1, 3, 2, 8]
but I produced
[[1], [5], [99], [2], [1], [3], [2], [8]].

getting error while training yolov3 :- ValueError: tf.function-decorated function tried to create variables on non-first call

I am training a custom yolov3 model and getting error "ValueError: tf.function-decorated function tried to create variables on non-first call." while fitting the model for training.
getting eror on fit_generator statement. Could somebody please help?
train_generator = BatchGenerator(
instances = train_ints,
anchors = config['model']['anchors'],
labels = labels,
downsample = 32, # ratio between network input's size and network output's size, 32 for YOLOv3
max_box_per_image = max_box_per_image,
batch_size = config['train']['batch_size'],
min_net_size = config['model']['min_input_size'],
max_net_size = config['model']['max_input_size'],
shuffle = True,
jitter = 0.3,
norm = normalize
)
train_model, infer_model = create_model(
nb_class = len(labels),
anchors = config['model']['anchors'],
max_box_per_image = max_box_per_image,
max_grid = [config['model']['max_input_size'], config['model']['max_input_size']],
batch_size = config['train']['batch_size'],
warmup_batches = warmup_batches,
ignore_thresh = config['train']['ignore_thresh'],
multi_gpu = multi_gpu,
saved_weights_name = config['train']['saved_weights_name'],
lr = config['train']['learning_rate'],
grid_scales = config['train']['grid_scales'],
obj_scale = config['train']['obj_scale'],
noobj_scale = config['train']['noobj_scale'],
xywh_scale = config['train']['xywh_scale'],
class_scale = config['train']['class_scale'],
)
###############################
# Kick off the training
###############################
callbacks = create_callbacks(config['train']['saved_weights_name'], config['train']['tensorboard_dir'], infer_model)
print ("before kickoff", len(train_generator))
print ("before kickoff", train_generator)
**train_model.fit_generator(
generator = train_generator,**
steps_per_epoch = len(train_generator) * config['train']['train_times'],
epochs = config['train']['nb_epochs'] + config['train']['warmup_epochs'],
#epochs = 1,
verbose = 2 if config['train']['debug'] else 1,
callbacks = callbacks,
workers = 2,
max_queue_size = 8
)
print ("after kickoff")
Error am getting is :
WARNING:tensorflow:Model failed to serialize as JSON. Ignoring... Layer YoloLayer has arguments in __init__ and therefore must override get_config.
Epoch 1/21
Traceback (most recent call last):
File "train.py", line 300, in
main(args)
File "train.py", line 269, in main
train_model.fit_generator(
File "/Users/karthikeyan/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/util/deprecation.py", line 324, in new_func
return func(*args, **kwargs)
File "/Users/karthikeyan/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py", line 1815, in fit_generator
return self.fit(
File "/Users/karthikeyan/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py", line 108, in _method_wrapper
return method(self, *args, **kwargs)
File "/Users/karthikeyan/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py", line 1098, in fit
tmp_logs = train_function(iterator)
File "/Users/karthikeyan/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py", line 780, in call
result = self._call(*args, **kwds)
File "/Users/karthikeyan/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py", line 840, in _call
return self._stateless_fn(*args, **kwds)
File "/Users/karthikeyan/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py", line 2828, in call
graph_function, args, kwargs = self._maybe_define_function(args, kwargs)
File "/Users/karthikeyan/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py", line 3213, in _maybe_define_function
graph_function = self._create_graph_function(args, kwargs)
File "/Users/karthikeyan/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/function.py", line 3065, in _create_graph_function
func_graph_module.func_graph_from_py_func(
File "/Users/karthikeyan/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py", line 986, in func_graph_from_py_func
func_outputs = python_func(*func_args, **func_kwargs)
File "/Users/karthikeyan/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py", line 600, in wrapped_fn
return weak_wrapped_fn().wrapped(*args, **kwds)
File "/Users/karthikeyan/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py", line 973, in wrapper
raise e.ag_error_metadata.to_exception(e)
ValueError: in user code:
/Users/karthikeyan/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py:806 train_function *
return step_function(self, iterator)
/Users/karthikeyan/Desktop/table/yolo.py:46 call *
batch_seen = tf.Variable(0.)
/Users/karthikeyan/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/ops/variables.py:262 __call__ **
return cls._variable_v2_call(*args, **kwargs)
/Users/karthikeyan/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/ops/variables.py:244 _variable_v2_call
return previous_getter(
/Users/karthikeyan/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/ops/variables.py:67 getter
return captured_getter(captured_previous, **kwargs)
/Users/karthikeyan/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/distribute/distribute_lib.py:2857 creator
return next_creator(**kwargs)
/Users/karthikeyan/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/ops/variables.py:67 getter
return captured_getter(captured_previous, **kwargs)
/Users/karthikeyan/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/distribute/distribute_lib.py:2857 creator
return next_creator(**kwargs)
/Users/karthikeyan/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/ops/variables.py:67 getter
return captured_getter(captured_previous, **kwargs)
/Users/karthikeyan/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/distribute/distribute_lib.py:2857 creator
return next_creator(**kwargs)
/Users/karthikeyan/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/ops/variables.py:67 getter
return captured_getter(captured_previous, **kwargs)
/Users/karthikeyan/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py:701 invalid_creator_scope
raise ValueError(
ValueError: tf.function-decorated function tried to create variables on non-first call.
I am able to find an answer. Including "tf.config.experimental_run_functions_eagerly(True)" this statement after import tensorflow resolved the issue.

tensorflow v1 GradientTape: AttributeError: 'NoneType' object has no attribute 'eval'

I want to compute the gradient of the distance between the NSynth WaveNet encoding of two sine waves.
This is tensorflow v1.
I am working with code based upon https://github.com/magenta/magenta/blob/master/magenta/models/nsynth/wavenet/fastgen.py
A minimal example of my bug is in this colab notebook: https://colab.research.google.com/drive/1oTEU8QAaOs0K1A0KHrAdt7kA7MkadNDr?usp=sharing
Here is the code:
# Commented out IPython magic to ensure Python compatibility.
# %tensorflow_version 1.x
!pip3 install -q magenta
!wget -c http://download.magenta.tensorflow.org/models/nsynth/wavenet-ckpt.tar && tar xvf wavenet-ckpt.tar
checkpoint_path = './wavenet-ckpt/model.ckpt-200000'
import math
from magenta.models.nsynth.wavenet import fastgen
import tensorflow as tf
session_config = tf.ConfigProto(allow_soft_placement=True)
session_config.gpu_options.allow_growth = True
sess = tf.Session(config=session_config)
pi = 3.1415926535897
SR = 16000
sample_length = 64000
DURATION_SECONDS = sample_length / SR
def sine(hz):
time = tf.linspace(0.0, DURATION_SECONDS, sample_length)
return tf.constant(0.5) * tf.cos(2.0 * pi * time * hz)
net = fastgen.load_nsynth(batch_size=2, sample_length=sample_length)
saver = tf.train.Saver()
saver.restore(sess, checkpoint_path)
"""We have two sine waves at 440 and 660 Hz. We use the encoder to generate two (125, 16) encodings:"""
twosines = tf.stack([sine(440), sine(660)]).eval(session=sess)
print(sess.run(net["encoding"], feed_dict={net["X"]: twosines}).shape)
"""Compute the distance between the two sine waves"""
distencode = tf.reduce_mean(tf.abs(net["encoding"][0] - net["encoding"][1]))
print(sess.run(distencode, feed_dict={net["X"]: twosines}))
"""I don't know why the following code doesn't work, but if I did I could solve the real task....
"""
net["X"] = twosines
distencode.eval(session=sess)
"""Here is the code that I need to work. I want to compute the gradient of the distance between the NSynth encoding of two sine waves:"""
fp = tf.constant(660.0)
newsines = tf.stack([sine(440), sine(fp)])
with tf.GradientTape() as g:
g.watch(fp)
dd_dfp = g.gradient(distencode, fp)
print(dd_dfp.eval(session=sess))
The last block, which I want to evaluate, gets the following error:
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-12-b5b8cdd00b24> in <module>()
4 g.watch(fp)
5 dd_dfp = g.gradient(distencode, fp)
----> 6 print(dd_dfp.eval(session=sess))
AttributeError: 'NoneType' object has no attribute 'eval'
I believe I need to define the operations to be executed within this block. However, I am using a pretrained model that I am just computing the distance over, so I am not sure how to define execution in that block.
The second-to-last block, which would help me fix the last block, gives the following error:
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-10-c3411dcbfa2c> in <module>()
3 with tf.GradientTape() as g:
4 g.watch(fp)
----> 5 dd_dfp = g.gradient(distencode, g)
6 print(dd_dfp.eval(session=sess))
/tensorflow-1.15.2/python3.6/tensorflow_core/python/eager/backprop.py in gradient(self, target, sources, output_gradients, unconnected_gradients)
997 flat_sources = [_handle_or_self(x) for x in flat_sources]
998 for t in flat_sources_raw:
--> 999 if not t.dtype.is_floating:
1000 logging.vlog(
1001 logging.WARN, "The dtype of the source tensor must be "
AttributeError: 'GradientTape' object has no attribute 'dtype'
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
/tensorflow-1.15.2/python3.6/tensorflow_core/python/client/session.py in _do_call(self, fn, *args)
1364 try:
-> 1365 return fn(*args)
1366 except errors.OpError as e:
8 frames
InvalidArgumentError: 2 root error(s) found.
(0) Invalid argument: You must feed a value for placeholder tensor 'Placeholder' with dtype float and shape [2,64000]
[[{{node Placeholder}}]]
[[Mean/_759]]
(1) Invalid argument: You must feed a value for placeholder tensor 'Placeholder' with dtype float and shape [2,64000]
[[{{node Placeholder}}]]
0 successful operations.
0 derived errors ignored.
During handling of the above exception, another exception occurred:
InvalidArgumentError Traceback (most recent call last)
/tensorflow-1.15.2/python3.6/tensorflow_core/python/client/session.py in _do_call(self, fn, *args)
1382 '\nsession_config.graph_options.rewrite_options.'
1383 'disable_meta_optimizer = True')
-> 1384 raise type(e)(node_def, op, message)
1385
1386 def _extend_graph(self):
InvalidArgumentError: 2 root error(s) found.
(0) Invalid argument: You must feed a value for placeholder tensor 'Placeholder' with dtype float and shape [2,64000]
[[node Placeholder (defined at /tensorflow-1.15.2/python3.6/tensorflow_core/python/framework/ops.py:1748) ]]
[[Mean/_759]]
(1) Invalid argument: You must feed a value for placeholder tensor 'Placeholder' with dtype float and shape [2,64000]
[[node Placeholder (defined at /tensorflow-1.15.2/python3.6/tensorflow_core/python/framework/ops.py:1748) ]]
0 successful operations.
0 derived errors ignored.
Original stack trace for 'Placeholder':
File "/usr/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py", line 16, in <module>
app.launch_new_instance()
File "/usr/local/lib/python3.6/dist-packages/traitlets/config/application.py", line 664, in launch_instance
app.start()
File "/usr/local/lib/python3.6/dist-packages/ipykernel/kernelapp.py", line 499, in start
self.io_loop.start()
File "/usr/local/lib/python3.6/dist-packages/tornado/platform/asyncio.py", line 132, in start
self.asyncio_loop.run_forever()
File "/usr/lib/python3.6/asyncio/base_events.py", line 438, in run_forever
self._run_once()
File "/usr/lib/python3.6/asyncio/base_events.py", line 1451, in _run_once
handle._run()
File "/usr/lib/python3.6/asyncio/events.py", line 145, in _run
self._callback(*self._args)
File "/usr/local/lib/python3.6/dist-packages/tornado/ioloop.py", line 758, in _run_callback
ret = callback()
File "/usr/local/lib/python3.6/dist-packages/tornado/stack_context.py", line 300, in null_wrapper
return fn(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/zmq/eventloop/zmqstream.py", line 548, in <lambda>
self.io_loop.add_callback(lambda : self._handle_events(self.socket, 0))
File "/usr/local/lib/python3.6/dist-packages/zmq/eventloop/zmqstream.py", line 462, in _handle_events
self._handle_recv()
File "/usr/local/lib/python3.6/dist-packages/zmq/eventloop/zmqstream.py", line 492, in _handle_recv
self._run_callback(callback, msg)
File "/usr/local/lib/python3.6/dist-packages/zmq/eventloop/zmqstream.py", line 444, in _run_callback
callback(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/tornado/stack_context.py", line 300, in null_wrapper
return fn(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/ipykernel/kernelbase.py", line 283, in dispatcher
return self.dispatch_shell(stream, msg)
File "/usr/local/lib/python3.6/dist-packages/ipykernel/kernelbase.py", line 233, in dispatch_shell
handler(stream, idents, msg)
File "/usr/local/lib/python3.6/dist-packages/ipykernel/kernelbase.py", line 399, in execute_request
user_expressions, allow_stdin)
File "/usr/local/lib/python3.6/dist-packages/ipykernel/ipkernel.py", line 208, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "/usr/local/lib/python3.6/dist-packages/ipykernel/zmqshell.py", line 537, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/IPython/core/interactiveshell.py", line 2718, in run_cell
interactivity=interactivity, compiler=compiler, result=result)
File "/usr/local/lib/python3.6/dist-packages/IPython/core/interactiveshell.py", line 2822, in run_ast_nodes
if self.run_code(code, result):
File "/usr/local/lib/python3.6/dist-packages/IPython/core/interactiveshell.py", line 2882, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-5-5120c8282e75>", line 1, in <module>
net = fastgen.load_nsynth(batch_size=2, sample_length=sample_length)
File "/tensorflow-1.15.2/python3.6/magenta/models/nsynth/wavenet/fastgen.py", line 64, in load_nsynth
x = tf.placeholder(tf.float32, shape=[batch_size, sample_length])
File "/tensorflow-1.15.2/python3.6/tensorflow_core/python/ops/array_ops.py", line 2619, in placeholder
return gen_array_ops.placeholder(dtype=dtype, shape=shape, name=name)
File "/tensorflow-1.15.2/python3.6/tensorflow_core/python/ops/gen_array_ops.py", line 6669, in placeholder
"Placeholder", dtype=dtype, shape=shape, name=name)
File "/tensorflow-1.15.2/python3.6/tensorflow_core/python/framework/op_def_library.py", line 794, in _apply_op_helper
op_def=op_def)
File "/tensorflow-1.15.2/python3.6/tensorflow_core/python/util/deprecation.py", line 507, in new_func
return func(*args, **kwargs)
File "/tensorflow-1.15.2/python3.6/tensorflow_core/python/framework/ops.py", line 3357, in create_op
attrs, op_def, compute_device)
File "/tensorflow-1.15.2/python3.6/tensorflow_core/python/framework/ops.py", line 3426, in _create_op_internal
op_def=op_def)
File "/tensorflow-1.15.2/python3.6/tensorflow_core/python/framework/ops.py", line 1748, in __init__
self._traceback = tf_stack.extract_stack()
Thank you.

AttributeError: 'int' object has no attribute 'value'

I can't wrap my head around this problem I am getting here. I am running on Tensorflow 2 and I am really not seeing why this error appears. Is there something I am missing?
This is the relevant part of the code where the error appears:
from tensorflow.lite.experimental.examples.lstm.rnn import bidirectional_dynamic_rnn
from tensorflow.lite.experimental.examples.lstm.rnn_cell import TFLiteLSTMCell
...
lstm_cells = []
lstm_0 = TFLiteLSTMCell(num_units=256, forget_bias=0, name='rnn_0')
lstm_1 = TFLiteLSTMCell(num_units=256, forget_bias=0, name='rnn_1')
lstm_2 = TFLiteLSTMCell(num_units=128, forget_bias=0, name='rnn_2')
lstm_3 = TFLiteLSTMCell(num_units=128, forget_bias=0, name='rnn_3')
lstm_cells.append(lstm_0)
lstm_cells.append(lstm_1)
lstm_cells.append(lstm_2)
lstm_cells.append(lstm_3)
bi_LSTM_2 = layers.Lambda(buildLstmLayer, arguments={'layers' : lstm_cells})(fc_1)
...
This is the corresponding Lambda Layer. I am creating the bidirectional RNNs, but I think the error is more about the TFLiteLSTMCell itself, but I think I am using it correctly.
def buildLstmLayer(inputs, layers):
inputs = tf.transpose(inputs, [1,0,2])
# inputs = tf.unstack(inputs, axis=1)
inter_output, _ = bidirectional_dynamic_rnn (
layers[0],
layers[1],
inputs,
dtype='float32',
time_major=True)
inter_output = tf.concat(inter_output, 2)
output, _ = bidirectional_dynamic_rnn (
layers[2],
layers[3],
inter_output,
dtype='float32',
time_major=True)
output = tf.concat(output, 2)
# output = tf.stack(output, axis=1)
output = tf.transpose(output, [1,0,2])
return output
This is the traceback I am getting:
Traceback (most recent call last):
File "crnn_architecture.py", line 279, in <module>
model, base_model = CRNN_model(is_training=True)
File "crnn_architecture.py", line 108, in CRNN_model
bi_LSTM_2 = layers.Lambda(buildLstmLayer, arguments={'layers' : lstm_cells})(fc_1)
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/engine/base_layer.py", line 847, in __call__
outputs = call_fn(cast_inputs, *args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/layers/core.py", line 795, in call
return self.function(inputs, **arguments)
File "crnn_architecture.py", line 146, in buildLstmLayer
time_major=True)
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/lite/experimental/examples/lstm/rnn.py", line 379, in bidirectional_dynamic_rnn
scope=fw_scope)
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/lite/experimental/examples/lstm/rnn.py", line 266, in dynamic_rnn
dtype=dtype)
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/rnn.py", line 916, in _dynamic_rnn_loop
swap_memory=swap_memory)
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/control_flow_ops.py", line 2675, in while_loop
back_prop=back_prop)
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/while_v2.py", line 198, in while_loop
add_control_dependencies=add_control_dependencies)
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/func_graph.py", line 915, in func_graph_from_py_func
func_outputs = python_func(*func_args, **func_kwargs)
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/while_v2.py", line 176, in wrapped_body
outputs = body(*_pack_sequence_as(orig_loop_vars, args))
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/rnn.py", line 884, in _time_step
(output, new_state) = call_cell()
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/rnn.py", line 870, in <lambda>
call_cell = lambda: cell(input_t, state)
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/engine/base_layer.py", line 847, in __call__
outputs = call_fn(cast_inputs, *args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/layers/recurrent.py", line 137, in call
inputs, states = cell.call(inputs, states, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/lite/experimental/examples/lstm/rnn_cell.py", line 440, in call
if input_size.value is None:
AttributeError: 'int' object has no attribute 'value'