Variable assignment in TensorFlow gives error - tensorflow

If I have a placeholder:
placeholder = tf.placeholder(dtype=np.float32, shape=[1, 2])
And then I create an op which assigns the placeholder to a new variable y:
y = tf.Variable([[0, 0]], dtype=tf.float32)
y_op = tf.assign(y, placeholder)
Then specify a value which I will feed into this placeholder:
x = tf.Variable([[5, 5]], dtype=np.float32)
And finally run the operation:
sess = tf.Session()
sess.run(tf.global_variables_initializer())
sess.run(y_op, feed_dict={placeholder: x})
I get the following error:
ValueError: setting an array element with a sequence.
Why is this? From what I can see, the shapes of placeholder, y, and x, are all [1, 2].

You are trying to feed a graph variable using the feed dict. Feed dict and placeholders are for feeding external values into the graph. This code works:
placeholder = tf.placeholder(dtype=np.float32, shape=[1, 2])
y = tf.Variable([[0, 0]], dtype=tf.float32)
y_op = tf.assign(y, placeholder)
value = np.array([[5,5]])
sess = tf.Session()
sess.run(tf.global_variables_initializer())
r = sess.run(y_op, feed_dict={placeholder: value})
But if the value you want to use in the graph is already a tf.Variable, there is no reason to use feed dict at all. This also works:
x = tf.Variable([[5, 5]], dtype=tf.float32)
y = tf.Variable([[0, 0]], dtype=tf.float32)
y_op = tf.assign(y, x)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
r = sess.run(y_op)

Related

How to update the variable in tf.variable_scope with variable's name?

I want to modification the value about variable 'weight1' in tf.variable_scope.
I try to modification the value by other function, but it not work follow me.
def inference(q, reuse=False):
with tf.variable_scope('layer1', reuse = reuse):
x = tf.get_variable('weight1', [1, 3], initializer = tf.truncated_normal_initializer(stddev = 0.1))
y = tf.get_variable('weight2', [3, 1], initializer = tf.constant_initializer([[1],[2],[3]]))
return tf.matmul(x, y)
def update_process(reuse=True):
with tf.variable_scope('layer1', reuse = reuse):
x = tf.get_variable('weight1',[1, 3])
update=tf.assign(x, x-1)
with tf.Session() as sess:
sess.run(init)
print(sess.run(x))
init = tf.global_variables_initializer()
z = inference(1)
with tf.Session() as sess:
sess.run(init)
for i in range(5):
update_process(reuse = True)
print(sess.run(z))
print('\n')
I want to this code output different list about sess.run(z), but the value is always same.
You need to run sess.run(update) in update_process in the same session that the inference part of the graph runs:
import tensorflow as tf
def inference(q, reuse=False):
with tf.variable_scope('layer1', reuse = reuse):
x = tf.get_variable('weight1', [1, 3], initializer = tf.truncated_normal_initializer(stddev = 0.1))
y = tf.get_variable('weight2', [3, 1], initializer = tf.constant_initializer([[1],[2],[3]]))
return tf.matmul(x, y)
def update_process(reuse=True):
with tf.variable_scope('layer1', reuse = reuse):
x = tf.get_variable('weight1',[1, 3])
update=tf.assign(x, x-1)
print(sess.run(update))
z = inference(1)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for i in range(5):
update_process(reuse = True)
print(sess.run(z))
print('\n')

How to use Dateset to feed array of data to inference with tensorflow?

I am new to Tensorflow Dataset API, and I could not fully understand the simplicity of its design, so I need some help.
Here is a simple example
import tensorflow as tf
x = tf.placeholder(tf.int32, shape=[])
y = tf.square(x)
with tf.Session() as sess:
print(sess.run(y, {x: 2}))
# result is 4, simple
If I have an integer array arr_x=[2, 3, 5, 8, 10], how can I use Dateset API to iterate the array?
I am trying
p = tf.placeholder(tf.int32, shape=[None])
d = tf.data.Dataset.from_tensor_slices(p)
d = d.map(lambda x: x)
iter = d.make_initializable_iterator()
next_element = iter.get_next()
with tf.Session() as sess:
sess.run(iter.initializer, feed_dict={p: [2, 3, 4]})
while True:
try:
print sess.run(y, next_element)
except tf.errors.OutOfRangeError:
break
But no luck, any idea?
What about:
arr_x = np.array([2, 3, 5, 8, 10])
arr_y = np.array([[0,1],[1,0],[1,0],[0,1],[1,0]])
dataset = tf.data.Dataset.from_tensor_slices((arr_x, arr_y))
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
sess = tf.Session()
while True:
try:
print(sess.run(next_element))
except tf.errors.OutOfRangeError:
break

Difference between SparseTensor and SparseTensorValue

What is the difference between SparseTensor and SparseTensorValue? Is there anything I should keep in mind if I want to build the sparse tensor based on fed indices and values? I could only find a few toy examples.
It depends on where you define your Sparse Tensor.
If you would like to define the tensor outside the graph, e.g. define the sparse tensor for later data feed, use SparseTensorValue. In contrast, if the sparse tensor is defined in graph, use SparseTensor
Sample code for tf.SparseTensorValue:
x_sp = tf.sparse_placeholder(dtype=tf.float32)
W = tf.Variable(tf.random_normal([6, 6]))
y = tf.sparse_tensor_dense_matmul(sp_a=x_sp, b=W)
init = tf.global_variables_initializer()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
sess.run(init)
stv = tf.SparseTensorValue(indices=[[0, 0], [1, 2]], values=[1.1, 1.2],
dense_shape=[2,6])
result = sess.run(y,feed_dict={x_sp:stv})
print(result)
Sample code for tf.SparseTensor:
indices_i = tf.placeholder(dtype=tf.int64, shape=[2, 2])
values_i = tf.placeholder(dtype=tf.float32, shape=[2])
dense_shape_i = tf.placeholder(dtype=tf.int64, shape=[2])
st = tf.SparseTensor(indices=indices_i, values=values_i, dense_shape=dense_shape_i)
W = tf.Variable(tf.random_normal([6, 6]))
y = tf.sparse_tensor_dense_matmul(sp_a=st, b=W)
init = tf.global_variables_initializer()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
sess.run(init)
result = sess.run(y,feed_dict={indices_i:[[0, 0], [1, 2]], values_i:[1.1, 1.2], dense_shape_i:[2,6]})
print(result)
Hope this help~

Why when I changed the test batch size in tensorflow, result was different

Here is my train code:
x = tf.placeholder(tf.float32, [None, 2, 3])
cell = tf.nn.rnn_cell.GRUCell(10)
_, state = tf.nn.dynamic_rnn(
cell = cell,
inputs = x,
dtype = tf.float32)
# train
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
x_ = np.ones([2,2,3],np.float32)
output = sess.run(state, feed_dict= {x: x_})
print output
saver = tf.train.Saver()
saver.save(sess,'./model')
The result is:
[[ 0.12851571 -0.23994535 0.23123585 -0.00047993 -0.02450397
-0.21048039 -0.18786618 0.04458345 -0.08603278 -0.08259721]
[ 0.12851571 -0.23994535 0.23123585 -0.00047993 -0.02450397
-0.21048039 -0.18786618 0.04458345 -0.08603278 -0.08259721]]
Here is my test code:
x = tf.placeholder(tf.float32, [None, 2, 3])
cell = tf.nn.rnn_cell.GRUCell(10)
_, state = tf.nn.dynamic_rnn(
cell = cell,
inputs = x,
dtype = tf.float32)
with tf.Session() as sess:
x_ = np.ones([1,2,3],np.float32)
saver = tf.train.Saver()
saver.restore(sess,'./model')
output = sess.run(state, feed_dict= {x: x_})
print output
Then I get:
[[ 0.12851571 -0.23994535 0.2312358 -0.00047993 -0.02450397
-0.21048039 -0.18786621 0.04458345 -0.08603278 -0.08259721]]
You see, result has changed slightly. When I set the test batch to 2, the result is same as train result. So what's wrong? My tf version is 0.12
An update (not an answer)
The tf.nn.rnn_cell.GRUCell and tf.nn.dynamic_rnn are both deprecated and replaced with tf.keras.layers.GRU.
Using the deprecated functions, it appears you don't even need to save and restore the model or even run it multiple times. All you need is to run it on an odd batch size and use tf.float32 as the dtype and the last result will be slightly off.
import tensorflow as tf
import numpy as np
x = tf.placeholder(tf.float32, [None, 2, 3])
cell = tf.nn.rnn_cell.GRUCell(10)
_, state = tf.nn.dynamic_rnn(
cell = cell,
inputs = x,
dtype = tf.float32)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
x_ = np.ones([3,2,3],np.float32)
output = sess.run(state, feed_dict= {x: x_})
print(output)
Returns results like this
[[ 0.03649516 -0.08052824 -0.0539998 0.2995336 -0.12542574 -0.04339318
0.3872745 0.08844283 -0.14555818 -0.4216033 ]
[ 0.03649516 -0.08052824 -0.0539998 0.2995336 -0.12542574 -0.04339318
0.3872745 0.08844283 -0.14555818 -0.4216033 ]
[ 0.03649516 -0.08052824 -0.05399981 0.2995336 -0.12542574 -0.04339318
0.38727456 0.08844285 -0.14555818 -0.4216033 ]]
The anomaly only seems to appear in the last row for odd length batches.
An alternative view is, that a single batch is correct, and all even sized batches are off and everything other than the last row of odd sized batches is off.
It does not seem to happen for dtype=float64 or dtype=float16, both of which seem stable.
Furthermore, this issue is only in the hidden state and does not seem to appear in the regular output.

InvalidArgumentError when running tf.global_variables_initializer()

Basically, I have a function that expects a tensor x and two placeholders z and c.
def error_robust(x,z,c):
zz = tf.reshape(z, [-1, 28, 28, 1])
var = tf.reduce_mean(x-zz)
out = tf.cond( tf.abs(var) <= c, lambda: (c*c/6.0)*(1 - tf.pow(1-tf.pow(var/c,2),3)), lambda: tf.Variable(c*c/6.0) )
return out
I define the placeholders and tensors that I am gonna use:
# TENSORFLOW PLACEHOLDERS
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
flat_mnist_data = tf.placeholder(tf.float32, [None, 28*28])
dropout_keep_prob = tf.placeholder(tf.float32)
param_robust = tf.placeholder(tf.float32, shape=())
Calling the defined function does not generate any errors:
error_r = error_robust(layer1_b.reconstruction, flat_mnist_data, param_robust)
This generates an error:
sess.run(tf.global_variables_initializer())
InvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'Placeholder' with dtype float
[[Node: Placeholder = Placeholderdtype=DT_FLOAT, shape=[], _device="/job:localhost/replica:0/task:0/gpu:0"]]
I don't really understand why it happens. Any ideas on how to solve this one?
Ok, I got it. I was first expecting c to be a simple scalar. So I was using tf.Variable as the second argument of the tf.cond.
Updating the error_robust function solves it:
def error_robust(x,z,c):
zz = tf.reshape(z, [-1, 28, 28, 1])
var = tf.reduce_mean(x-zz)
out = tf.cond( tf.abs(var) <= c, lambda: (c*c/6.0)*(1 - tf.pow(1-tf.pow(var/c,2),3)), lambda: c*c/6.0 )
return out