Tape gradient gives wrong output - tensorflow

I am trying to compute gradient using tape.gradient() but it gives me wrong answer. The error is in the lines u_z=tape.gradient(u,z,unconnected_gradients=tf.UnconnectedGradients.ZERO) and two lines that follow it from below code. The function u is not constant in the variables z,f,t but the output from computing tape.gradient(u,z) or tape.gradient(u,t) gives me a None object. If I pass unconnected_gradients=tf.UnconnectedGradients.ZERO as the argument, then I get 0.0 as the derivative, which does not make sense. So one thing that might have gone wrong is that the network gets disconnected but I cannot understand why this happens and how to fix it. I am using tensorflow 2.6.0 and keras 2.6.0. I provide the code and error message below.
import tensorflow as tf
import numpy as np
from tensorflow import keras
import os
from tqdm import trange
import matplotlib.pyplot as plt
# Switch of unnecessary TF warning messages
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
class Model():
def __init__(self):
self.optimizer = keras.optimizers.Adam()
self.initializer = tf.keras.initializers.HeNormal()
self.batchSize = 500
self.number_epochs=5000
def NN(self,num_layers = 3, num_neurons = 30):
model_ = keras.models.Sequential()
model_.add(keras.layers.Dense(num_neurons,activation='tanh',input_dim=3,kernel_initializer = self.initializer))
for layer in range(num_layers-1):
model_.add(keras.layers.Dense(num_neurons,activation='tanh',kernel_initializer=self.initializer))
model_.add(keras.layers.Dense(1,kernel_initializer=self.initializer))
return model_
def solve_pde(self,value_function,X,idx):
z,f,t = X[:,0:1],X[:,1:2],X[:,2:3]
with tf.GradientTape(persistent=True) as tape:
u = value_function(tf.concat([z,f,t],axis=1))
u_z = tape.gradient(u,z,unconnected_gradients=tf.UnconnectedGradients.ZERO)
u_zz = tape.gradient(u_z,z,unconnected_gradients=tf.UnconnectedGradients.ZERO)
u_t = tape.gradient(u,t)
u_pde = u_t + u_z + u_zz - tf.cast(0.5,dtype=tf.float32) * u
return u_pde
def loss_function(self,batchSize):
z = tf.linspace(0.001,0.999, 200)
f = tf.linspace(0.1,0.2, 20)
z_tile = tf.tile(tf.expand_dims(z,axis=-1),multiples=[20,1])
f_tile = tf.reshape(tf.repeat(f,200),[-1,1])
dt = 0.9
X=tf.concat((z_tile,f_tile,tf.reshape(tf.repeat(dt,z_tile.shape[0]),[-1,1])),axis=1)
X_pde = tf.concat((z_tile,f_tile,tf.random.uniform(shape=(z_tile.shape[0],1),minval=0,maxval=dt)),axis=1)
x_star = tf.concat((z_tile,f_tile,tf.reshape(tf.repeat(0.0,z_tile.shape[0]),[-1,1])),axis=1)
idx = np.random.choice(X.shape[0],batchSize,replace=True)
loss_e = self.solve_pde(self.value_function_e,X_pde,idx)
self.value_updated = self.value_function_e(tf.concat[x_star[:,0:1],x_star[:,1:2],x_star[:,2:3]]).numpy().reshape(self.innerStep.Nz,self.innerStep.Nf).transpose()
return loss_e
#tf.function
def training_step(self):
with tf.GradientTape(persistent=True) as tape:
loss_e = self.loss_function(self.batchSize)
grads_valueE = tape.gradient(loss_e,self.theta_valueFunction_e)
self.optimizer.apply_gradients(zip(grads_valueE,self.theta_valueFunction_e))
return loss_e
def train_model(self):
self.value_function_e = self.NN()
self.theta_valueFunction_e = self.value_function_e.trainable_variables
self.LVF= []
for epoch in trange(self.number_epochs):
print(epoch)
loss_e = self.training_step()
self.LVF_list.append(loss_e.numpy())
if __name__=="__main__":
ext = Model()
ext.train_model()
The error message along with full traceback is
Traceback (most recent call last):
File "<ipython-input-26-f5a127c3c9ae>", line 1, in <module>
runfile('C:/Users/user/Google Drive/S/Research Project4/trial.py', wdir='C:/Users/user/Google Drive/SFI/Research Project4')
File "C:\Users\user\AppData\Local\Continuum\anaconda3\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 827, in runfile
execfile(filename, namespace)
File "C:\Users\user\AppData\Local\Continuum\anaconda3\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 110, in execfile
exec(compile(f.read(), filename, 'exec'), namespace)
File "C:/Users/user/Google Drive/SFI/Research Project4/trial.py", line 85, in <module>
ext.train_model()
File "C:/Users/user/Google Drive/SFI/Research Project4/trial.py", line 79, in train_model
loss_e = self.training_step()
File "C:\Users\user\AppData\Local\Continuum\anaconda3\lib\site-packages\tensorflow\python\eager\def_function.py", line 862, in __call__
return self._python_function(*args, **kwds)
File "C:\Users\user\AppData\Local\Continuum\anaconda3\lib\site-packages\tensorflow\python\eager\function.py", line 3985, in bound_method_wrapper
return wrapped_fn(weak_instance(), *args, **kwargs)
File "C:/Users/user/Google Drive/SFI/Research Project4/trial.py", line 65, in training_step
loss_e = self.loss_function(self.batchSize)
File "C:/Users/user/Google Drive/SFI/Research Project4/trial.py", line 58, in loss_function
loss_e = self.solve_pde(self.value_function_e,X_pde,idx)
File "C:/Users/user/Google Drive/SFI/Research Project4/trial.py", line 34, in solve_pde
u_pde = u_t + u_z + u_zz - tf.cast(0.5,dtype=tf.float32) * u
File "C:\Users\user\AppData\Local\Continuum\anaconda3\lib\site-packages\tensorflow\python\ops\math_ops.py", line 1399, in r_binary_op_wrapper
y, x = maybe_promote_tensors(y, x)
File "C:\Users\user\AppData\Local\Continuum\anaconda3\lib\site-packages\tensorflow\python\ops\math_ops.py", line 1335, in maybe_promote_tensors
ops.convert_to_tensor(tensor, dtype, name="x"))
File "C:\Users\user\AppData\Local\Continuum\anaconda3\lib\site-packages\tensorflow\python\profiler\trace.py", line 163, in wrapped
return func(*args, **kwargs)
File "C:\Users\user\AppData\Local\Continuum\anaconda3\lib\site-packages\tensorflow\python\framework\ops.py", line 1566, in convert_to_tensor
ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
File "C:\Users\user\AppData\Local\Continuum\anaconda3\lib\site-packages\tensorflow\python\framework\constant_op.py", line 346, in _constant_tensor_conversion_function
return constant(v, dtype=dtype, name=name)
File "C:\Users\user\AppData\Local\Continuum\anaconda3\lib\site-packages\tensorflow\python\framework\constant_op.py", line 272, in constant
allow_broadcast=True)
File "C:\Users\user\AppData\Local\Continuum\anaconda3\lib\site-packages\tensorflow\python\framework\constant_op.py", line 283, in _constant_impl
return _constant_eager_impl(ctx, value, dtype, shape, verify_shape)
File "C:\Users\user\AppData\Local\Continuum\anaconda3\lib\site-packages\tensorflow\python\framework\constant_op.py", line 308, in _constant_eager_impl
t = convert_to_eager_tensor(value, ctx, dtype)
File "C:\Users\user\AppData\Local\Continuum\anaconda3\lib\site-packages\tensorflow\python\framework\constant_op.py", line 106, in convert_to_eager_tensor
return ops.EagerTensor(value, ctx.device_name, dtype)
ValueError: Attempt to convert a value (None) with an unsupported type (<class 'NoneType'>) to a Tensor.
Any help is much appreciated. Thank you.

You have 2 problems in your code which prevents you from getting the result you want.
If you want to compute higher-order derivatives you have to create nested GradientTape objects
GradientTape automatically track variables in its context, if you want to track tensors (as in your case, you want to track z and t) you have to call tape.watch(<my_tensor>) otherwise you will not have gradients for it.
Fixed code:
def solve_pde(self, value_function, X, idx):
z, f, t = X[:, 0:1], X[:, 1:2], X[:, 2:3]
with tf.GradientTape(persistent=True) as tape:
tape.watch(z)
with tf.GradientTape(persistent=True) as tape2:
tape2.watch(z)
tape2.watch(t)
u = value_function(tf.concat([z, f, t], axis=1))
u_z = tape2.gradient(u, z)
u_zz = tape.gradient(u_z, z)
u_t = tape2.gradient(u, t)
u_pde = u_t + u_z + u_zz - tf.cast(0.5, dtype=tf.float32) * u
return u_pde
More on gradient tape can be found in the official documentation: https://www.tensorflow.org/api_docs/python/tf/GradientTape

Related

TypeError: Can not convert a NoneType into a Tensor or Operation -- Error believe related to converting to graph

Below find my model:
class CustomModel(tf.keras.Model):
def __init__(self, model1, model2, model3, model4):
super(deep_and_wide, self).__init__()
self.model1 = model1
self.model2 = model2
self.model3 = model3
self.model4 = model4
def call(self, inputs):
x1 = self.mode1([inputs["a"], inputs["b"]])
x2 = self.model2([inputs["a"], inputs["b"]])
x3 = self.model3([inputs["a"], inputs["b"]])
x4 = self.model4([inputs["a"], inputs["b"]])
x = Concatenate()([x1, x2, x3])
x = TimeDistributed(Dense(2))(x)
x = Add()([x, x4])
x_fc = Dense(1)(x)
x_ec = Dense(1)(x)
return x_fc, x_ec
def train_step(self, data):
with tf.GradientTape() as tape:
data = data_adapter.expand_1d(data)
batch_inputs, batch_outputs, sample_weight= data_adapter.unpack_x_y_sample_weight(data)
y_true_fc, y_true_ec = batch_outputs["y_fc"], batch_outputs["y_ec"]
y_pred_fc, y_pred_ec = self(batch_inputs, training=True)
loss_fc = self.compiled_loss(y_true_fc, y_pred_fc)
loss_ec = self.compiled_loss(y_true_ec, y_pred_ec)
print("here")
trainable_variables = self.trainable_variables
print("here")
gradients = tape.gradient([loss_fc, loss_ec], trainable_variables)
print("here")
self.optimizer.apply_gradients(zip(gradients, trainable_variables))
print("here")
And below is my custom loss
class CustomLoss(tf.keras.losses.Loss):
def __init__(self, mask=True, alpha=1, beta=1, gamma=1, dtype=tf.float64):
super(CustomLoss, self).__init__(reduction=tf.keras.losses.Reduction.NONE)
self.mask = mask
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self.dtype = dtype
def call(self, y_true, y_pred):
def loss_fn(y_true, y_pred, mask):
y_true = tf.boolean_mask(y_true, mask)
y_pred = tf.boolean_mask(y_pred, mask)
return tf.keras.losses.MSE(y_true, y_pred)
self.mask = tf.not_equal(y_true, 0.)
y_true = tf.cast(y_true, self.dtype)
y_pred = tf.cast(y_pred, self.dtype)
y_pred = tf.multiply(y_pred, tf.cast(self.mask, dtype=self.dtype))
y_pred_cum = tf.math.cumsum(y_pred, axis=1)
y_pred_cum = tf.multiply(y_pred_cum, tf.cast(self.mask, dtype=self.dtype))
y_true_cum = tf.math.cumsum(y_true, axis=1)
y_true_cum = tf.multiply(y_true_cum, tf.cast(self.mask, dtype=self.dtype))
loss_value = self.alpha * loss_fn(y_true, y_pred, self.mask) + \
self.gamma * loss_fn(y_true_cum, y_pred_cum, self.mask)
return loss_value
And then finally:
optimizer = tf.keras.optimizers.Adam()
loss = CustomLoss()
model.compile(optimizer, loss)
model.fit(train_data, epochs=5, validation_data=val_data)
My data inputs are of size (sequence length, feature length) where sequence length is variable hence I am using tf.data.experimental.bucket_by_sequence_length to pad to max sequence length of the batch (as opposed to batch to max sequence length). All in all, my train and val data are tf.data.Datasets each created using tf.data.experimental.bucket_by_sequence_length where each batch is of size (None, None, feature length).
When I run the above code, I get the following errors and cannot seem to understand where I am going wrong:
Traceback (most recent call last):
File "<input>", line 75, in <module>
File "C:\Users\\Anaconda3\envs\tf_recsys\lib\site-packages\tensorflow\python\keras\engine\training.py", line 1100, in fit
tmp_logs = self.train_function(iterator)
File "C:\Users\\Anaconda3\envs\tf_recsys\lib\site-packages\tensorflow\python\eager\def_function.py", line 828, in __call__
result = self._call(*args, **kwds)
File "C:\Users\\Anaconda3\envs\tf_recsys\lib\site-packages\tensorflow\python\eager\def_function.py", line 871, in _call
self._initialize(args, kwds, add_initializers_to=initializers)
File "C:\Users\\Anaconda3\envs\tf_recsys\lib\site-packages\tensorflow\python\eager\def_function.py", line 725, in _initialize
self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
File "C:\Users\\Anaconda3\envs\tf_recsys\lib\site-packages\tensorflow\python\eager\function.py", line 2969, in _get_concrete_function_internal_garbage_collected
graph_function, _ = self._maybe_define_function(args, kwargs)
File "C:\Users\\Anaconda3\envs\tf_recsys\lib\site-packages\tensorflow\python\eager\function.py", line 3361, in _maybe_define_function
graph_function = self._create_graph_function(args, kwargs)
File "C:\Users\\Anaconda3\envs\tf_recsys\lib\site-packages\tensorflow\python\eager\function.py", line 3196, in _create_graph_function
func_graph_module.func_graph_from_py_func(
File "C:\Users\\Anaconda3\envs\tf_recsys\lib\site-packages\tensorflow\python\framework\func_graph.py", line 990, in func_graph_from_py_func
func_outputs = python_func(*func_args, **func_kwargs)
File "C:\Users\\Anaconda3\envs\tf_recsys\lib\site-packages\tensorflow\python\eager\def_function.py", line 634, in wrapped_fn
out = weak_wrapped_fn().__wrapped__(*args, **kwds)
File "C:\Users\\Anaconda3\envs\tf_recsys\lib\site-packages\tensorflow\python\framework\func_graph.py", line 977, in wrapper
raise e.ag_error_metadata.to_exception(e)
TypeError: in user code:
C:\Users\\Anaconda3\envs\tf_recsys\lib\site-packages\tensorflow\python\keras\engine\training.py:805 train_function *
return step_function(self, iterator)
C:\Users\\Anaconda3\envs\tf_recsys\lib\site-packages\tensorflow\python\keras\engine\training.py:795 step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
C:\Users\\Anaconda3\envs\tf_recsys\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:1259 run
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
C:\Users\\Anaconda3\envs\tf_recsys\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:2730 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
C:\Users\\Anaconda3\envs\tf_recsys\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:3417 _call_for_each_replica
return fn(*args, **kwargs)
C:\Users\\Anaconda3\envs\tf_recsys\lib\site-packages\tensorflow\python\keras\engine\training.py:790 run_step **
with ops.control_dependencies(_minimum_control_deps(outputs)):
C:\Users\\Anaconda3\envs\tf_recsys\lib\site-packages\tensorflow\python\framework\ops.py:5359 control_dependencies
return get_default_graph().control_dependencies(control_inputs)
C:\Users\\Anaconda3\envs\tf_recsys\lib\site-packages\tensorflow\python\framework\func_graph.py:362 control_dependencies
return super(FuncGraph, self).control_dependencies(filtered_control_inputs)
C:\Users\\Anaconda3\envs\tf_recsys\lib\site-packages\tensorflow\python\framework\ops.py:4815 control_dependencies
c = self.as_graph_element(c)
C:\Users\\Anaconda3\envs\tf_recsys\lib\site-packages\tensorflow\python\framework\ops.py:3726 as_graph_element
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
C:\Users\\Anaconda3\envs\tf_recsys\lib\site-packages\tensorflow\python\framework\ops.py:3814 _as_graph_element_locked
raise TypeError("Can not convert a %s into a %s." %
TypeError: Can not convert a NoneType into a Tensor or Operation.
The four print statements inserted in the train_step function above are printed.
This NoneType refers to the returned value of the custom train_step, when using a custom train_step you should return something that can be converted into a tensor so that the minimum control dependencies can process it, typically, the loss value as {"loss": loss_value} and potentially some other metrics, or at least an empty dict {}.

Tensorflow Estimator API: Remember LSTM state from previous batch for next batch with dynamic batch_size

I know that a similar question has been already asked several times here on stackoverflow and across the Internet, but I am just not able to find a solution for the following problem: I am trying to build a stateful LSTM model in tensorflow and its Estimator API.
I tried the solution of Tensorflow, best way to save state in RNNs?, which works as long as i am using a static batch_size. Having a dynamic batch_size causes the following problem:
ValueError: initial_value must have a shape specified:
Tensor("DropoutWrapperZeroState/MultiRNNCellZeroState/DropoutWrapperZeroState/LSTMCellZeroState/zeros:0",
shape=(?, 200), dtype=float32)
Setting tf.Variable(...., validate_shape=False) just moves the problem further down the Graph:
Traceback (most recent call last):
File "model.py", line 576, in <module>
tf.app.run(main=run_experiment)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/platform/app.py", line 48, in run
_sys.exit(main(_sys.argv[:1] + flags_passthrough))
File "model.py", line 137, in run_experiment
hparams=params # HParams
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/learn_runner.py", line 210, in run
return _execute_schedule(experiment, schedule)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/learn_runner.py", line 47, in _execute_schedule
return task()
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/experiment.py", line 495, in train_and_evaluate
self.train(delay_secs=0)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/experiment.py", line 275, in train
hooks=self._train_monitors + extra_hooks)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/experiment.py", line 660, in _call_train
hooks=hooks)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/estimator/estimator.py", line 241, in train
loss = self._train_model(input_fn=input_fn, hooks=hooks)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/estimator/estimator.py", line 560, in _train_model
model_fn_lib.ModeKeys.TRAIN)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/estimator/estimator.py", line 545, in _call_model_fn
features=features, labels=labels, **kwargs)
File "model.py", line 218, in model_fn
output, state = get_model(features, params)
File "model.py", line 567, in get_model
model = lstm(inputs, params)
File "model.py", line 377, in lstm
output, new_states = tf.nn.dynamic_rnn(multicell, inputs=inputs, initial_state = states)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn.py", line 574, in dynamic_rnn
dtype=dtype)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn.py", line 737, in _dynamic_rnn_loop
swap_memory=swap_memory)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 2770, in while_loop
result = context.BuildLoop(cond, body, loop_vars, shape_invariants)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 2599, in BuildLoop
pred, body, original_loop_vars, loop_vars, shape_invariants)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 2549, in _BuildLoop
body_result = body(*packed_vars_for_body)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn.py", line 722, in _time_step
(output, new_state) = call_cell()
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn.py", line 708, in <lambda>
call_cell = lambda: cell(input_t, state)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn_cell_impl.py", line 752, in __call__
output, new_state = self._cell(inputs, state, scope)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn_cell_impl.py", line 180, in __call__
return super(RNNCell, self).__call__(inputs, state)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/layers/base.py", line 441, in __call__
outputs = self.call(inputs, *args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn_cell_impl.py", line 916, in call
cur_inp, new_state = cell(cur_inp, cur_state)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn_cell_impl.py", line 752, in __call__
output, new_state = self._cell(inputs, state, scope)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn_cell_impl.py", line 180, in __call__
return super(RNNCell, self).__call__(inputs, state)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/layers/base.py", line 441, in __call__
outputs = self.call(inputs, *args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn_cell_impl.py", line 542, in call
lstm_matrix = _linear([inputs, m_prev], 4 * self._num_units, bias=True)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn_cell_impl.py", line 1002, in _linear
raise ValueError("linear is expecting 2D arguments: %s" % shapes)
ValueError: linear is expecting 2D arguments: [TensorShape([Dimension(None), Dimension(62)]), TensorShape(None)]
According to github issue 2838 it is NOT recommended to use non-trainable variables anyway(???), which is why I continued looking for other solutions.
Now I use placeholders and something like that (also suggested in the github thread) in my model_fn:
def rnn_placeholders(state):
"""Convert RNN state tensors to placeholders with the zero state as default."""
if isinstance(state, tf.contrib.rnn.LSTMStateTuple):
c, h = state
c = tf.placeholder_with_default(c, c.shape, c.op.name)
h = tf.placeholder_with_default(h, h.shape, h.op.name)
return tf.contrib.rnn.LSTMStateTuple(c, h)
elif isinstance(state, tf.Tensor):
h = state
h = tf.placeholder_with_default(h, h.shape, h.op.name)
return h
else:
structure = [rnn_placeholders(x) for x in state]
return tuple(structure)
state = rnn_placeholders(cell.zero_state(batch_size, tf.float32))
for tensor in flatten(state):
tf.add_to_collection('rnn_state_input', tensor)
x, new_state = tf.nn.dynamic_rnn(...)
for tensor in flatten(new_state):
tf.add_to_collection('rnn_state_output', tensor)
But unfortunately I do not know how to use the placeholder new_state to feed back its values to the placeholder state every iteration, when using tf.Estimator API etc. Since I am quite new to Tensorflow I think I have a lack of conceptual knowledge here. Might it be possible to use a custom SessionRunHook?:
class UpdateHook(tf.train.SessionRunHook):
def before_run(self, run_context):
run_args = super(UpdateHook, self).before_run(run_context)
run_args = tf.train.SessionRunArgs(new_state)
#print(run_args)
return run_args
def after_run(self, run_context, run_values):
#run_values gives the actual value of new_state.
# How to update now the state placeholder??
Is there anyone who has an idea how to solve that problem? Tips and tricks are highly appreciated!!!
Thanks a lot!
PS: If something is unclear let me know ;)
EDIT: Unfortunately I am using the new tf.data API and cannot use StateSavingRNNEstimator as Eugene suggested.
this answer might be late.
I had a similar problem some months ago.
I solved it using a customised SessionRunHook. It might not be perfect in terms of performance but you can give it a try.
class LSTMStateHook(tf.train.SessionRunHook):
def __init__(self, params):
self.init_states = None
self.current_state = np.zeros((params.rnn_layers, 2, params.batch_size, params.state_size))
def before_run(self, run_context):
run_args = tf.train.SessionRunArgs([tf.get_default_graph().get_tensor_by_name('LSTM/output_states:0')],{self.init_states:self.current_state,},)
return run_args
def after_run(self, run_context, run_values):
self.current_state = run_values[0][0] //depends on your session run arguments!!!!!!!
def begin(self):
self.init_states = tf.get_default_graph().get_tensor_by_name('LSTM/init_states:0')
In your code where you define your lstm graph you need something like this:
if self.stateful is True:
init_states = multicell.zero_state(self.batch_size, tf.float32)
init_states = tf.identity(init_states, "init_states")
l = tf.unstack(init_states, axis=0)
rnn_tuple_state = tuple([tf.nn.rnn_cell.LSTMStateTuple(l[idx][0], l[idx][1]) for idx in range(self.rnn_layers)])
else:
rnn_tuple_state = multicell.zero_state(self.batch_size, tf.float32)
# Unroll RNN
output, output_states = tf.nn.dynamic_rnn(multicell, inputs=inputs, initial_state = rnn_tuple_state)
if self.stateful is True:
output_states = tf.identity(output_states, "output_states")
return output
There is an estimator your can base your code on that uses batch_sequences_with_states. It is called StateSavingRNNEstimator. Unless you are using the new tf.contrib.data / tf.data API, it should be enough to get you started.

Tensorflow: value error with variable_scope in LSTM

This is my code in tensorflow to train a GAN. I am training des to able to distinguish between fake and original video. I have important not relevant part of code to avoid stack over flow mostly code error
X = tf.placeholder(tf.float32, shape=[None, 28, 28])
D_W1 = tf.Variable(xavier_init([1024, 128]))
D_b1 = tf.Variable(tf.zeros(shape=[128]))
D_W2 = tf.Variable(xavier_init([128, 1]))
D_b2 = tf.Variable(tf.zeros(shape=[1]))
theta_D = [D_W1, D_W2, D_b1, D_b2]
rnn_size = 1024
rnn_layer = 2
Z = tf.placeholder(tf.float32, shape=[None, 100])
G_W1 = tf.Variable(xavier_init([100, 128]))
G_b1 = tf.Variable(tf.zeros(shape=[128]))
G_W2 = tf.Variable(xavier_init([128, 784]))
G_b2 = tf.Variable(tf.zeros(shape=[784]))
theta_G = [G_W1, G_W2, G_b1, G_b2]
def sample_Z(m, n):
return np.random.uniform(-1., 1., size=[m, n])
def generator(z):
G_h1 = tf.nn.relu(tf.matmul(z, G_W1) + G_b1)
G_log_prob = tf.matmul(G_h1, G_W2) + G_b2
G_prob = tf.nn.sigmoid(G_log_prob)
G_prob = tf.reshape(G_prob, [-1,28, 28])
return G_prob
def discriminator(x):
x = [tf.squeeze(t, [1]) for t in tf.split(x, 28, 1)]
# with tf.variable_scope('cell_def'):
stacked_rnn1 = []
for iiLyr1 in range(rnn_layer):
stacked_rnn1.append(tf.nn.rnn_cell.BasicLSTMCell(num_units=rnn_size, state_is_tuple=True))
lstm_multi_fw_cell = tf.contrib.rnn.MultiRNNCell(cells=stacked_rnn1)
# with tf.variable_scope('rnn_def'):
dec_outputs, dec_state = tf.contrib.rnn.static_rnn(
lstm_multi_fw_cell, x, dtype=tf.float32)
D_h1 = tf.nn.relu(tf.matmul(dec_outputs[-1], D_W1) + D_b1)
D_logit = tf.matmul(D_h1, D_W2) + D_b2
D_prob = tf.nn.sigmoid(D_logit)
return D_prob, D_logit
G_sample = generator(Z)
print(G_sample.get_shape())
print(X.get_shape())
D_real, D_logit_real = discriminator(X)
D_fake, D_logit_fake = discriminator(G_sample)
D_loss = -tf.reduce_mean(tf.log(D_real) + tf.log(1. - D_fake))
G_loss = -tf.reduce_mean(tf.log(D_fake))
summary_d = tf.summary.histogram('D_loss histogram', D_loss)
summary_g = tf.summary.histogram('D_loss histogram', G_loss)
summary_s = tf.summary.scalar('D_loss scalar', D_loss)
summary_s1 = tf.summary.scalar('scalar scalar', G_loss)
# Add image summary
summary_op = tf.summary.image("plot", image)
D_solver = tf.train.AdamOptimizer().minimize(D_loss, var_list=theta_D)
G_solver = tf.train.AdamOptimizer().minimize(G_loss, var_list=theta_G)
mb_size = 128
Z_dim = 100
mnist = input_data.read_data_sets('../../MNIST_data', one_hot=True)
# merged_summary_op = tf.summary.merge_all()
sess = tf.Session()
saver = tf.train.Saver()
writer1 = tf.summary.FileWriter('log/log-sample1', sess.graph)
writer2 = tf.summary.FileWriter('log/log-sample2', sess.graph)
sess.run(tf.global_variables_initializer())
if not os.path.exists('out/'):
os.makedirs('out/')
i = 0
with tf.variable_scope("myrnn") as scope:
for it in range(5000):
X_mb, _ = mnist.train.next_batch(mb_size)
X_mb = tf.reshape(X_mb, [mb_size, -1, 28])
_, D_loss_curr = sess.run([D_solver, D_loss], feed_dict={X: X_mb, Z: sample_Z(mb_size, Z_dim)})
_, G_loss_curr = sess.run([G_solver, G_loss], feed_dict={Z: sample_Z(mb_size, Z_dim)})
summary_str, eded = sess.run([summary_d, summary_s], feed_dict={X: X_mb, Z: sample_Z(mb_size, Z_dim)})
writer1.add_summary(summary_str, it)
writer1.add_summary(eded, it)
summary_str1, eded1 = sess.run([summary_g, summary_s1], feed_dict={X: X_mb, Z: sample_Z(mb_size, Z_dim)})
writer2.add_summary(summary_str1, it)
writer2.add_summary(eded1, it)
if it % 1000 == 0:
print('Iter: {}'.format(it))
print('D loss: {:.4}'. format(D_loss_curr))
print('G_loss: {:.4}'.format(G_loss_curr))
print()
save_path = saver.save(sess, "tmp/model.ckpt")
writer1.close()
writer2.close()
`
Following is the error when I run this code please help.
Traceback (most recent call last):
File "/Users/tulsijain/Desktop/Deep Learning Practise/GAN/vanila.py", line 104, in <module>
D_fake, D_logit_fake = discriminator(G_sample)
File "/Users/tulsijain/Desktop/Deep Learning Practise/GAN/vanila.py", line 64, in discriminator
lstm_multi_fw_cell, x, dtype=tf.float32)
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/tensorflow/python/ops/rnn.py", line 1212, in static_rnn
(output, state) = call_cell()
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/tensorflow/python/ops/rnn.py", line 1199, in <lambda>
call_cell = lambda: cell(input_, state)
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/tensorflow/python/ops/rnn_cell_impl.py", line 180, in __call__
return super(RNNCell, self).__call__(inputs, state)
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/tensorflow/python/layers/base.py", line 441, in __call__
outputs = self.call(inputs, *args, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/tensorflow/python/ops/rnn_cell_impl.py", line 916, in call
cur_inp, new_state = cell(cur_inp, cur_state)
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/tensorflow/python/ops/rnn_cell_impl.py", line 180, in __call__
return super(RNNCell, self).__call__(inputs, state)
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/tensorflow/python/layers/base.py", line 441, in __call__
outputs = self.call(inputs, *args, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/tensorflow/python/ops/rnn_cell_impl.py", line 383, in call
concat = _linear([inputs, h], 4 * self._num_units, True)
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/tensorflow/python/ops/rnn_cell_impl.py", line 1017, in _linear
initializer=kernel_initializer)
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/tensorflow/python/ops/variable_scope.py", line 1065, in get_variable
use_resource=use_resource, custom_getter=custom_getter)
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/tensorflow/python/ops/variable_scope.py", line 962, in get_variable
use_resource=use_resource, custom_getter=custom_getter)
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/tensorflow/python/ops/variable_scope.py", line 360, in get_variable
validate_shape=validate_shape, use_resource=use_resource)
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/tensorflow/python/ops/variable_scope.py", line 1405, in wrapped_custom_getter
*args, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/tensorflow/python/ops/rnn_cell_impl.py", line 183, in _rnn_get_variable
variable = getter(*args, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/tensorflow/python/ops/rnn_cell_impl.py", line 183, in _rnn_get_variable
variable = getter(*args, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/tensorflow/python/ops/variable_scope.py", line 352, in _true_getter
use_resource=use_resource)
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/tensorflow/python/ops/variable_scope.py", line 664, in _get_single_variable
name, "".join(traceback.format_list(tb))))
ValueError: Variable rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel already exists, disallowed. Did you mean to set reuse=True in VarScope? Originally defined at:
File "/Users/tulsijain/Desktop/Deep Learning Practise/GAN/vanila.py", line 64, in discriminator
lstm_multi_fw_cell, x, dtype=tf.float32)
File "/Users/tulsijain/Desktop/Deep Learning Practise/GAN/vanila.py", line 103, in <module>
D_real, D_logit_real = discriminator(X)
It is GAN. I am using MNIST data to train generator and discriminator.
Add a reuse parameter to the BasicLSTMCell. Since you are calling the discriminator function twice and calling reuse=None, both the times, it throws the errors when try to create variables with same name. In this context you need to reuse the variables from the graph for the second call; as you don't need to create new set of variables.
def discriminator(x, reuse):
x = [tf.squeeze(t, [1]) for t in tf.split(x, 28, 1)]
# with tf.variable_scope('cell_def'):
stacked_rnn1 = []
for iiLyr1 in range(rnn_layer):
stacked_rnn1.append(tf.nn.rnn_cell.BasicLSTMCell(num_units=rnn_size, state_is_tuple=True, reuse=reuse))
lstm_multi_fw_cell = tf.contrib.rnn.MultiRNNCell(cells=stacked_rnn1)
# with tf.variable_scope('rnn_def'):
dec_outputs, dec_state = tf.contrib.rnn.static_rnn(
lstm_multi_fw_cell, x, dtype=tf.float32)
D_h1 = tf.nn.relu(tf.matmul(dec_outputs[-1], D_W1) + D_b1)
D_logit = tf.matmul(D_h1, D_W2) + D_b2
D_prob = tf.nn.sigmoid(D_logit)
return D_prob, D_logit
....
D_real, D_logit_real = discriminator(X, None)
D_fake, D_logit_fake = discriminator(G_sample, True)
....

The code that works for LinearRegressor returns AttributeError: 'Tensor' object has no attribute 'get' for DynamicRnnEstimator

In the beginning, I need to say that I am using TF v 1.1.
The code:
import random
import tensorflow as tf
xData = []
yData = []
for _ in range(10000):
x = random.random()
xData.append(x)
y = 2 * x
yData.append(y)
xc = tf.contrib.layers.real_valued_column("")
estimator = tf.contrib.learn.DynamicRnnEstimator(problem_type = constants.ProblemType.LINEAR_REGRESSION,
prediction_type = PredictionType.SINGLE_VALUE,
sequence_feature_columns = [xc],
context_feature_columns = None,
num_units = 5,
cell_type = 'lstm',
optimizer = 'SGD',
learning_rate = '0.1')
def get_train_inputs():
x = tf.constant(xData)
y = tf.constant(yData)
return x, y
estimator.fit(input_fn=get_train_inputs, steps=TRAINING_STEPS)
I got:
AttributeError: 'Tensor' object has no attribute 'get'
here.
The same code works for LinearRegressor instead of DynamicRnnEstimator.
WARNING:tensorflow:From
E:\Python35\lib\site-packages\tensorflow\contrib\learn\python\learn\estimators\dynamic_rnn_estimator.py:724:
regression_target (from
tensorflow.contrib.layers.python.layers.target_column) is deprecated
and will be removed after 2016-11-12. Instructions for updating: This
file will be removed after the deprecation date.Please switch to
third_party/tensorflow/contrib/learn/python/learn/estimators/head.py
WARNING:tensorflow:Using temporary folder as model directory:
C:\Users\pavel\AppData\Local\Temp\tmpzy68t_iw
Blockquote
Traceback (most recent
call last): File
"C:/Users/pavel/PycharmProjects/rnnEstimator/main.py", line 31, in
estimator.fit(input_fn=get_train_inputs, steps=1000)
File
"E:\Python35\lib\site-packages\tensorflow\python\util\deprecation.py",
line 281, in new_func return func(*args, **kwargs)
File
"E:\Python35\lib\site-packages\tensorflow\contrib\learn\python\learn\estimators\estimator.py",
line 430, in fit loss = self._train_model(input_fn=input_fn,
hooks=hooks)
File
"E:\Python35\lib\site-packages\tensorflow\contrib\learn\python\learn\estimators\estimator.py",
line 927, in _train_model model_fn_ops = self._get_train_ops(features,
labels)
File
"E:\Python35\lib\site-packages\tensorflow\contrib\learn\python\learn\estimators\estimator.py",
line 1132, in _get_train_ops return self._call_model_fn(features,
labels, model_fn_lib.ModeKeys.TRAIN)
File
"E:\Python35\lib\site-packages\tensorflow\contrib\learn\python\learn\estimators\estimator.py",
line 1103, in _call_model_fn model_fn_results =
self._model_fn(features, labels, **kwargs)
File
"E:\Python35\lib\site-packages\tensorflow\contrib\learn\python\learn\estimators\dynamic_rnn_estimator.py",
line 516, in _dynamic_rnn_model_fn sequence_length =
features.get(sequence_length_key) AttributeError: 'Tensor' object has
no attribute 'get'
Update:
Issue in TF repo's
BATCH_SIZE = 32
SEQUENCE_LENGTH = 16
xc = tf.contrib.layers.real_valued_column("")
estimator = tf.contrib.learn.DynamicRnnEstimator(problem_type = constants.ProblemType.LINEAR_REGRESSION,
prediction_type = PredictionType.SINGLE_VALUE,
sequence_feature_columns = [xc],
context_feature_columns = None,
num_units = 5,
cell_type = 'lstm',
optimizer = 'SGD',
learning_rate = 0.1)
def get_train_inputs():
x = tf.random_uniform([BATCH_SIZE, SEQUENCE_LENGTH])
y = tf.reduce_mean(x, axis=1)
x = tf.expand_dims(x, axis=2)
return {"": x}, y
estimator.fit(input_fn=get_train_inputs, steps=1000)

Strange error when taking gradient of TensorArray

I'm using the code pasted below. The 'forward' part of the code seems to work by virtue of the "assert root_emb == 1 + emb[0] * emb[1]" passing. However, once a training step is taken (the line following the assert), a strange error appears suggesting an issue with the TensorArray written to during the wihle loop.
tensorflow.python.framework.errors.InvalidArgumentError: TensorArray
TensorArray#gradients: Could not read from TensorArray index 2 because
it has not yet been written to. [[Node:
gradients/while/TensorArrayWrite_grad/TensorArrayRead =
TensorArrayRead[_class=["loc:#TensorArray"], dtype=DT_FLOAT,
_device="/job:localhost/replica:0/task:0/cpu:0"](gradients/while/TensorArrayWrite_grad/TensorArrayGrad/TensorArrayGrad,
gradients/while/TensorArrayWrite_grad/TensorArrayRead/StackPop,
gradients/while/TensorArrayWrite_grad/TensorArrayGrad/gradient_flow)]]
Caused by op u'gradients/while/TensorArrayWrite_grad/TensorArrayRead',
defined at: File "minimal.py", line 82, in
model = TreeRNN(8, 1, 1, degree=2) File "minimal.py", line 61, in init
self.grad = tf.gradients(self.loss, self.params) File "/Library/Python/2.7/site-packages/tensorflow/python/ops/gradients.py",
line 481, in gradients
in_grads = _AsList(grad_fn(op, *out_grads)) File "/Library/Python/2.7/site-packages/tensorflow/python/ops/tensor_array_grad.py",
line 115, in _TensorArrayWriteGrad
grad = g.read(index) File "/Library/Python/2.7/site-packages/tensorflow/python/ops/tensor_array_ops.py",
line 177, in read
dtype=self._dtype, name=name) File "/Library/Python/2.7/site-packages/tensorflow/python/ops/gen_data_flow_ops.py",
line 781, in _tensor_array_read
flow_in=flow_in, dtype=dtype, name=name) File "/Library/Python/2.7/site-packages/tensorflow/python/ops/op_def_library.py",
line 694, in apply_op
op_def=op_def) File "/Library/Python/2.7/site-packages/tensorflow/python/framework/ops.py",
line 2154, in create_op
original_op=self._default_original_op, op_def=op_def) File "/Library/Python/2.7/site-packages/tensorflow/python/framework/ops.py",
line 1154, in init
self._traceback = _extract_stack()
...which was originally created as op u'while/TensorArrayWrite',
defined at: File "minimal.py", line 82, in
model = TreeRNN(8, 1, 1, degree=2) File "minimal.py", line 50, in init
loop_vars=(self.time, node_emb, tf.zeros([1]))) File "/Library/Python/2.7/site-packages/tensorflow/python/ops/control_flow_ops.py",
line 1681, in While
back_prop=back_prop, swap_memory=swap_memory, name=name) File "/Library/Python/2.7/site-packages/tensorflow/python/ops/control_flow_ops.py",
line 1671, in while_loop
result = context.BuildLoop(cond, body, loop_vars) File "/Library/Python/2.7/site-packages/tensorflow/python/ops/control_flow_ops.py",
line 1572, in BuildLoop
body_result = body(*vars_for_body_with_tensor_arrays) File "minimal.py", line 43, in _recurrence
new_node_emb = node_emb.write(children_and_parent[-1], parent_emb) File
"/Library/Python/2.7/site-packages/tensorflow/python/ops/tensor_array_ops.py",
line 200, in write
name=name) File "/Library/Python/2.7/site-packages/tensorflow/python/ops/gen_data_flow_ops.py",
line 875, in _tensor_array_write
value=value, flow_in=flow_in, name=name) File "/Library/Python/2.7/site-packages/tensorflow/python/ops/op_def_library.py",
line 694, in apply_op
op_def=op_def)
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import tensor_array_ops, control_flow_ops
class TreeRNN(object):
def __init__(self, num_emb, emb_dim, output_dim, degree=2, learning_rate=0.01):
self.num_emb = num_emb
self.emb_dim = emb_dim
self.output_dim = output_dim
self.degree= degree
self.learning_rate = tf.Variable(float(learning_rate), trainable=False)
self.embeddings = tf.Variable(self.init_matrix([self.num_emb, self.emb_dim]))
self.recursive_unit = self.create_recursive_unit()
self.W_out = tf.Variable(self.init_matrix([self.output_dim, self.emb_dim]))
self.b_out = tf.Variable(self.init_vector([self.output_dim]))
self.x = tf.placeholder(tf.int32, shape=[None]) # word indices
self.tree = tf.placeholder(tf.int32, shape=[None, self.degree + 1])
self.y = tf.placeholder(tf.float32, shape=[self.output_dim])
num_words, = tf.unpack(tf.shape(self.x), 1) # also num leaves
emb_x = tf.gather(self.embeddings, self.x)
node_emb = tensor_array_ops.TensorArray(
dtype=tf.float32, size=num_words - 1, dynamic_size=True,
clear_after_read=False)
node_emb = node_emb.unpack(emb_x)
num_nodes, _ = tf.unpack(tf.shape(self.tree), 2) # num internal nodes
tree_traversal = tensor_array_ops.TensorArray(
dtype=tf.int32, size=num_nodes)
tree_traversal = tree_traversal.unpack(self.tree)
def _recurrence(t, node_emb, _):
node_info = tree_traversal.read(t)
children_and_parent = tf.unpack(node_info, self.degree + 1)
child_emb = []
for i in xrange(self.degree):
child_emb.append(node_emb.read(children_and_parent[i]))
parent_emb = self.recursive_unit(child_emb)
new_node_emb = node_emb.write(children_and_parent[-1], parent_emb)
return t + 1, new_node_emb, parent_emb
self.time = tf.constant(0, dtype=tf.int32, name='time')
_, _, final_emb = control_flow_ops.While(
cond=lambda t, _1, _2: t < num_nodes,
body=_recurrence,
loop_vars=(self.time, node_emb, tf.zeros([1])))
self.final_state = final_emb
self.pred_y = self.activation(
tf.matmul(self.W_out, tf.reshape(self.final_state, [self.emb_dim, 1]))
+ self.b_out)
self.loss = self.loss_fn(self.y, self.pred_y)
self.params = tf.trainable_variables()
opt = tf.train.GradientDescentOptimizer(self.learning_rate)
self.grad = tf.gradients(self.loss, self.params)
self.updates = opt.apply_gradients(zip(self.grad, self.params))
def init_matrix(self, shape):
return tf.random_normal(shape, stddev=0.1)
def init_vector(self, shape):
return tf.zeros(shape)
def create_recursive_unit(self):
def unit(child_emb): # very simple
return 1 + child_emb[0] * child_emb[1]
return unit
def activation(self, inp):
return tf.sigmoid(inp)
def loss_fn(self, y, pred_y):
return tf.reduce_sum(tf.square(y - pred_y))
model = TreeRNN(8, 1, 1, degree=2)
sess = tf.Session()
sess.run(tf.initialize_all_variables())
root_emb = sess.run([model.final_state],
feed_dict={model.x: np.array([0, 1]), model.tree: np.array([[0, 1, 2]])})
emb, = sess.run([model.embeddings])
assert root_emb == 1 + emb[0] * emb[1]
out = sess.run([model.updates, model.loss],
feed_dict={model.x: np.array([0, 1]),
model.tree: np.array([[0, 1, 2]]),
model.y: np.array([0])})
set parallel_iterations=1 in tf.while_loop