Given a 2D tensor
T = [[1, 2, 3]
[4, 5, 6]]
and a 1D tensor containing horizontal shifts, say, s = [0, -2, 1], how can I obtain the following 3D tensor R?
R[0] = T
R[1] = [[3, 0, 0], # shifted two to the left,
[6, 0, 0]] # padding the rest with zeros
R[2] = [[0, 1, 2], # shifted one to the right,
[0, 4, 5]] # padding the rest with zeros
I know about tf.contrib.image.translate, but that isn't differentiable, so I am looking for some elegant combination of padding/slicing/looping/concatenating operations that accomplishes the same thing.
I have only come up with two ways to use tf.map_fn(). The first method is to fill about 0 in T and slice it.
import tensorflow as tf
T = tf.constant([[1, 2, 3],[4, 5, 6]],dtype=tf.float32)
s = tf.constant([0, -2, 1])
left = tf.reduce_max(s)
right = tf.reduce_min(s)
left_mask = tf.zeros(shape=(tf.shape(T)[0],left))
right_mask = tf.zeros(shape=(tf.shape(T)[0],tf.abs(right)))
tmp_slice = tf.concat([left_mask,T,right_mask],axis=-1)
result = tf.map_fn(lambda x: tmp_slice[:,left-x:left-x+tf.shape(T)[1]],s,dtype=T.dtype)
grads = tf.gradients(ys=result,xs=T)
with tf.Session() as sess:
print(sess.run(result))
print(sess.run(grads))
# print
[[[1. 2. 3.]
[4. 5. 6.]]
[[3. 0. 0.]
[6. 0. 0.]]
[[0. 1. 2.]
[0. 4. 5.]]]
[array([[2., 2., 2.],
[2., 2., 2.]], dtype=float32)]
The second method is to compute a corresponding mask matrix by tf.sequence_mask and tf.roll().Then take the value by tf.where().
import tensorflow as tf
T = tf.constant([[1, 2, 3],[4, 5, 6]],dtype=tf.float32)
s = tf.constant([0, -2, 1])
def mask_f(x):
indices = tf.tile([x], (tf.shape(T)[0],))
mask = tf.sequence_mask(tf.shape(T)[1]-tf.abs(indices),tf.shape(T)[1])
mask = tf.roll(mask,shift=tf.maximum(0,x),axis=-1)
return tf.where(mask,tf.roll(T,shift=x,axis=-1),tf.zeros_like(T))
result = tf.map_fn(lambda x:mask_f(x),s,dtype=T.dtype)
grads = tf.gradients(ys=result,xs=T)
with tf.Session() as sess:
print(sess.run(result))
print(sess.run(grads))
# print
[[[1. 2. 3.]
[4. 5. 6.]]
[[3. 0. 0.]
[6. 0. 0.]]
[[0. 1. 2.]
[0. 4. 5.]]]
[array([[2., 2., 2.],
[2., 2., 2.]], dtype=float32)]
Update
I found new method to achieve it. In essence, horizontal shifts are T multiplied by an offset identity matrix. So we can use np.eye() to create factor.
import tensorflow as tf
import numpy as np
T = tf.constant([[1, 2, 3],[4, 5, 6]],dtype=tf.float32)
s = tf.constant([0, -2, 1])
new_T = tf.tile(tf.expand_dims(T,axis=0),[tf.shape(s)[0],1,1])
s_factor = tf.map_fn(lambda x: tf.py_func(lambda y: np.eye(T.get_shape().as_list()[-1],k=y),[x],tf.float64),s,tf.float64)
result = tf.matmul(new_T,tf.cast(s_factor,new_T.dtype))
grads = tf.gradients(ys=result,xs=T)
with tf.Session() as sess:
print(sess.run(result))
print(sess.run(grads))
# print
[[[1. 2. 3.]
[4. 5. 6.]]
[[3. 0. 0.]
[6. 0. 0.]]
[[0. 1. 2.]
[0. 4. 5.]]]
[array([[2., 2., 2.],
[2., 2., 2.]], dtype=float32)]
Related
I want to shift a tensor in a given axis. It's easy to do this in pandas or numpy. Like this:
import numpy as np
import pandas as pd
data = np.arange(0, 6).reshape(-1, 2)
pd.DataFrame(data).shift(1).fillna(0).values
Output is:
array([[0., 0.],
[0., 1.],
[2., 3.]])
But in tensorflow, the closest solution I found is tf.roll. But it shift the last row to the first row. (I don't want that). So I have to use something like
tf.roll + tf.slice(remove the last row) + tf.concat(add tf.zeros to the first row).
It's really ugly.
Is there a better way to handle shift in tensorflow or keras?
Thanks.
I think I find a better way for this problem.
We could use tf.roll, then apply tf.math.multiply to set the first row to zeros.
Sample code is as follows:
Original tensor:
A = tf.cast(tf.reshape(tf.range(27), (-1, 3, 3)), dtype=tf.float32)
A
Output:
<tf.Tensor: id=117, shape=(3, 3, 3), dtype=float32, numpy=
array([[[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.]],
[[ 9., 10., 11.],
[12., 13., 14.],
[15., 16., 17.]],
[[18., 19., 20.],
[21., 22., 23.],
[24., 25., 26.]]], dtype=float32)>
Shift (like pd.shift):
B = tf.concat((tf.zeros((1, 3)), tf.ones((2, 3))), axis=0)
C = tf.expand_dims(B, axis=0)
tf.math.multiply(tf.roll(A, 1, axis=1), C)
Output:
<tf.Tensor: id=128, shape=(3, 3, 3), dtype=float32, numpy=
array([[[ 0., 0., 0.],
[ 0., 1., 2.],
[ 3., 4., 5.]],
[[ 0., 0., 0.],
[ 9., 10., 11.],
[12., 13., 14.]],
[[ 0., 0., 0.],
[18., 19., 20.],
[21., 22., 23.]]], dtype=float32)>
Try this:
import tensorflow as tf
input = tf.constant([[0, 1, 3], [4, 5, 6], [7, 8, 9]])
shifted_0dim = input[1:]
shifted_1dim = input[:, 1:]
shifted2 = input[2:]
Generalizing the accepted answer to arbitrary tensor shapes, desired shift, and axis to shift:
import tensorflow as tf
def tf_shift(tensor, shift=1, axis=0):
dim = len(tensor.shape)
if axis > dim:
raise ValueError(
f'Value of axis ({axis}) must be <= number of tensor axes ({dim})'
)
mask_dim = dim - axis
mask_shape = tensor.shape[-mask_dim:]
zero_dim = min(shift, mask_shape[0])
mask = tf.concat(
[tf.zeros(tf.TensorShape(zero_dim) + mask_shape[1:]),
tf.ones(tf.TensorShape(mask_shape[0] - zero_dim) + mask_shape[1:])],
axis=0
)
for i in range(dim - mask_dim):
mask = tf.expand_dims(mask, axis=0)
return tf.multiply(
tf.roll(tensor, shift, axis),
mask
)
EDIT:
This code above doesn't allow for negative shift values, and is pretty slow. Here is a more efficient version utilizing tf.roll and tf.concat without creating a mask and multiplying the tensor of interest by it.
import tensorflow as tf
def tf_shift(values: tf.Tensor, shift: int = 1, axis: int = 0):
pad = tf.zeros([val if i != axis else abs(shift) for i, val in enumerate(values.shape)],
dtype=values.dtype)
size = [-1 if i != axis else val - abs(shift) for i, val in enumerate(values.shape)]
if shift > 0:
shifted = tf.concat(
[pad, tf.slice(values, [0] * len(values.shape), size)],
axis=axis
)
elif shift < 0:
shifted = tf.concat(
[tf.slice(values, [0 if i != axis else abs(shift) for i, _ in enumerate(values.shape)], size), pad],
axis=axis
)
else:
shifted = values
return shifted
Assuming a 2d tensor, this function should mimic a Dataframe shift:
def shift_tensor(tensor, periods, fill_value):
num_row = len(tensor)
num_col = len(tensor[0])
pad = tf.fill([periods, num_col], fill_value)
if periods > 0:
shifted_tensor = tf.concat((pad, tensor[:(num_row - periods), :]), axis=0)
else:
shifted_tensor = tf.concat((tensor[:(num_row - periods), :], pad), axis=0)
return shifted_tensor
I tried to update a tensorflow diagonal weight matrix using scatter_update but with no luck so far. It either prompted shape mismatch or only updated along first row. It is very bizarre API behaviour. Could anyone help me out? Thanks
Example:
dia_mx = tf.Variable(initial_value=np.array([[1.,0.,0.],
[0.,1.,0.],
[0.,0.,1.]]))
new_diagonal_values = np.array([2., 3., 4.])
tf.scatter_update(dia_mx, [[0,0],[1,1],[2,2]], new_diagonal_values)
Get error:
InvalidArgumentError: shape of indices ([3,2]) is not compatible with the shape of updates ([3]) [Op:ResourceScatterUpdate]
Expect new diagonal matrix:
dia_mx = [[2.,0.,0.],
[0.,3.,0.],
[0.,0.,4.]]
To update specific indices in a tensor use tf.scatter_nd_update():
import tensorflow as tf
import numpy as np
dia_mx = tf.Variable(initial_value=np.array([[1.,0.,0.],
[0.,1.,0.],
[0.,0.,1.]]))
updates = [tf.constant(2.), tf.constant(3.), tf.constant(4.)]
indices = tf.constant([[0, 0], [1, 1], [2, 2]])
update_tensor = tf.scatter_nd_update(dia_mx, indices, updates)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print(update_tensor.eval())
# [[2. 0. 0.]
# [0. 3. 0.]
# [0. 0. 4.]]
tf.scatter_update() applies updates along the first dimension of a tensor. In this particular case it means that updates are applied for a whole row of a matrix at once:
dia_mx = tf.Variable(initial_value=np.array([[1.,0.,0.],
[0.,1.,0.],
[0.,0.,1.]]), dtype=tf.float32)
updates = tf.constant([[2., 0., 0.], [0., 3., 0.], [0., 0., 4.]], dtype=tf.float32)
indices = tf.constant([0, 1, 2])
update_tensor = tf.scatter_update(dia_mx, indices, updates)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print(update_tensor.eval())
# [[2. 0. 0.]
# [0. 3. 0.]
# [0. 0. 4.]]
I want to use l2-regularizatin with Dynamic_rnn in tensorflow but it seems this is not handled gracefully currently. While loop is the source of error. Below is a sample code snippet to reproduce the problem
import numpy as np
import tensorflow as tf
tf.reset_default_graph()
batch = 2
dim = 3
hidden = 4
with tf.variable_scope('test', regularizer=tf.contrib.layers.l2_regularizer(0.001)):
lengths = tf.placeholder(dtype=tf.int32, shape=[batch])
inputs = tf.placeholder(dtype=tf.float32, shape=[batch, None, dim])
cell = tf.nn.rnn_cell.GRUCell(hidden)
cell_state = cell.zero_state(batch, tf.float32)
output, _ = tf.nn.dynamic_rnn(cell, inputs, lengths, initial_state=cell_state)
inputs_ = np.asarray([[[0, 0, 0], [1, 1, 1], [2, 2, 2], [3, 3, 3]],
[[6, 6, 6], [7, 7, 7], [8, 8, 8], [9, 9, 9]]],
dtype=np.int32)
lengths_ = np.asarray([3, 1], dtype=np.int32)
this_throws_error = tf.losses.get_regularization_loss()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
output_ = sess.run(output, {inputs: inputs_, lengths: lengths_})
print(output_)
INFO:tensorflow:Cannot use 'test/rnn/gru_cell/gates/kernel/Regularizer/l2_regularizer' as input to 'total_regularization_loss' because 'test/rnn/gru_cell/gates/kernel/Regularizer/l2_regularizer' is in a while loop.
total_regularization_loss while context: None
test/rnn/gru_cell/gates/kernel/Regularizer/l2_regularizer while context: test/rnn/while/while_context
How can i add l2 regularization if i have dynamic_rnn in my network? Currently i can be going ahead with getting trainable collection at the loss calculation and adding l2 loss there but i also have word vectors as trainable parameters which i dont want to regularize on
I've encountered the same issue, and I've been trying to solve it with tensorflow==1.9.0.
Code:
import numpy as np
import tensorflow as tf
tf.reset_default_graph()
batch = 2
dim = 3
hidden = 4
with tf.variable_scope('test', regularizer=tf.contrib.layers.l2_regularizer(0.001)):
lengths = tf.placeholder(dtype=tf.int32, shape=[batch])
inputs = tf.placeholder(dtype=tf.float32, shape=[batch, None, dim])
cell = tf.nn.rnn_cell.GRUCell(hidden)
cell_state = cell.zero_state(batch, tf.float32)
output, _ = tf.nn.dynamic_rnn(cell, inputs, lengths, initial_state=cell_state)
inputs_ = np.asarray([[[0, 0, 0], [1, 1, 1], [2, 2, 2], [3, 3, 3]],
[[6, 6, 6], [7, 7, 7], [8, 8, 8], [9, 9, 9]]],
dtype=np.int32)
lengths_ = np.asarray([3, 1], dtype=np.int32)
this_throws_error = tf.losses.get_regularization_loss()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
output_ = sess.run(output, {inputs: inputs_, lengths: lengths_})
print(output_)
print(sess.run(this_throws_error))
This is the result of running the code:
...
File "/Users/piero/Development/mlenv3/lib/python3.6/site-packages/tensorflow/python/ops/control_flow_util.py", line 314, in CheckInputFromValidContext
raise ValueError(error_msg + " See info log for more details.")
ValueError: Cannot use 'test/rnn/gru_cell/gates/kernel/Regularizer/l2_regularizer' as input to 'total_regularization_loss' because 'test/rnn/gru_cell/gates/kernel/Regularizer/l2_regularizer' is in a while loop. See info log for more details.
Then I tried to put the dynamic_rnn call outside of the variable scope:
import numpy as np
import tensorflow as tf
tf.reset_default_graph()
batch = 2
dim = 3
hidden = 4
with tf.variable_scope('test', regularizer=tf.contrib.layers.l2_regularizer(0.001)):
lengths = tf.placeholder(dtype=tf.int32, shape=[batch])
inputs = tf.placeholder(dtype=tf.float32, shape=[batch, None, dim])
cell = tf.nn.rnn_cell.GRUCell(hidden)
cell_state = cell.zero_state(batch, tf.float32)
output, _ = tf.nn.dynamic_rnn(cell, inputs, lengths, initial_state=cell_state)
inputs_ = np.asarray([[[0, 0, 0], [1, 1, 1], [2, 2, 2], [3, 3, 3]],
[[6, 6, 6], [7, 7, 7], [8, 8, 8], [9, 9, 9]]],
dtype=np.int32)
lengths_ = np.asarray([3, 1], dtype=np.int32)
this_throws_error = tf.losses.get_regularization_loss()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
output_ = sess.run(output, {inputs: inputs_, lengths: lengths_})
print(output_)
print(sess.run(this_throws_error))
In theory this should be fine, as the regularization applies to the weights of the rnn that should contain variables initialized when the rnn cells are created.
This is the output:
[[[ 0. 0. 0. 0. ]
[ 0.1526176 0.33048663 -0.02288104 -0.1016309 ]
[ 0.24402776 0.68280864 -0.04888818 -0.26671126]
[ 0. 0. 0. 0. ]]
[[ 0.01998052 0.82368904 -0.00891946 -0.38874635]
[ 0. 0. 0. 0. ]
[ 0. 0. 0. 0. ]
[ 0. 0. 0. 0. ]]]
0.0
So placing the dynami_rnn call outside the variable scope works, in the sense that does not return errors, but the value of the loss is 0, suggesting that it's actually not really considering any weight from the rnn to compute the l2 loss.
Then I tried with tensorflow==1.12.0.
This is the output for the first script with dynamic_rnn inside the scope:
[[[ 0. 0. 0. 0. ]
[-0.17653276 0.06490126 0.02065791 -0.05175343]
[-0.413078 0.14486027 0.03922977 -0.1465032 ]
[ 0. 0. 0. 0. ]]
[[-0.5176822 0.03947531 0.00206934 -0.5542746 ]
[ 0. 0. 0. 0. ]
[ 0. 0. 0. 0. ]
[ 0. 0. 0. 0. ]]]
0.010403235
And this is the output with dynamic_rnn outside the scope:
[[[ 0. 0. 0. 0. ]
[ 0.04208181 0.03031874 -0.1749279 0.04617848]
[ 0.12169671 0.09322995 -0.29029205 0.08247502]
[ 0. 0. 0. 0. ]]
[[ 0.09673716 0.13300316 -0.02427006 0.00156245]
[ 0. 0. 0. 0. ]
[ 0. 0. 0. 0. ]
[ 0. 0. 0. 0. ]]]
0.0
The fact that the version with dynamic_rnn inside the scope returns a non-zero value suggests that it is working correctly, while in the other case, the returned value 0 suggests that it is not behaving as expected.
So the bottom line is: this was a bug in tensorflow that they solved between version 1.9.0 and version 1.12.0.
Suppose I have a tensor in Tensorflow that its values are like:
A = [[0.7, 0.2, 0.1],[0.1, 0.4, 0.5]]
How can I change this tensor into the following:
B = [[1, 0, 0],[0, 0, 1]]
In other words I want to just keep the maximum and replace it with 1.
Any help would be appreciated.
I think that you can solve it with a one-liner:
import tensorflow as tf
import numpy as np
x_data = [[0.7, 0.2, 0.1],[0.1, 0.4, 0.5]]
# I am using hard-coded dimensions for simplicity
x = tf.placeholder(dtype=tf.float32, name="x", shape=(2,3))
session = tf.InteractiveSession()
session.run(tf.one_hot(tf.argmax(x, 1), 3), {x: x_data})
The result is the one that you expect:
Out[6]:
array([[ 1., 0., 0.],
[ 0., 0., 1.]], dtype=float32)
I'm having trouble understanding a basic concept with tensorflow. How does indexing work for tensor read/write operations? In order to make this specific, how can the following numpy examples be translated to tensorflow (using tensors for the arrays, indices and values being assigned):
x = np.zeros((3, 4))
row_indices = np.array([1, 1, 2])
col_indices = np.array([0, 2, 3])
x[row_indices, col_indices] = 2
x
with output:
array([[ 0., 0., 0., 0.],
[ 2., 0., 2., 0.],
[ 0., 0., 0., 2.]])
... and ...
x[row_indices, col_indices] = np.array([5, 4, 3])
x
with output:
array([[ 0., 0., 0., 0.],
[ 5., 0., 4., 0.],
[ 0., 0., 0., 3.]])
... and finally ...
y = x[row_indices, col_indices]
y
with output:
array([ 5., 4., 3.])
There's github issue #206 to support this nicely, meanwhile you have to resort to verbose work-arounds
The first example can be done with tf.select that combines two same-shaped tensors by selecting each element from one or the other
tf.reset_default_graph()
row_indices = tf.constant([1, 1, 2])
col_indices = tf.constant([0, 2, 3])
x = tf.zeros((3, 4))
sess = tf.InteractiveSession()
# get list of ((row1, col1), (row2, col2), ..)
coords = tf.transpose(tf.pack([row_indices, col_indices]))
# get tensor with 1's at positions (row1, col1),...
binary_mask = tf.sparse_to_dense(coords, x.get_shape(), 1)
# convert 1/0 to True/False
binary_mask = tf.cast(binary_mask, tf.bool)
twos = 2*tf.ones(x.get_shape())
# make new x out of old values or 2, depending on mask
x = tf.select(binary_mask, twos, x)
print x.eval()
gives
[[ 0. 0. 0. 0.]
[ 2. 0. 2. 0.]
[ 0. 0. 0. 2.]]
The second one could be done with scatter_update, except scatter_update only supports on linear indices and works on variables. So you could create a temporary variable and use reshaping like this. (to avoid variables you could use dynamic_stitch, see the end)
# get linear indices
linear_indices = row_indices*x.get_shape()[1]+col_indices
# turn 'x' into 1d variable since "scatter_update" supports linear indexing only
x_flat = tf.Variable(tf.reshape(x, [-1]))
# no automatic promotion, so make updates float32 to match x
updates = tf.constant([5, 4, 3], dtype=tf.float32)
sess.run(tf.initialize_all_variables())
sess.run(tf.scatter_update(x_flat, linear_indices, updates))
# convert back into original shape
x = tf.reshape(x_flat, x.get_shape())
print x.eval()
gives
[[ 0. 0. 0. 0.]
[ 5. 0. 4. 0.]
[ 0. 0. 0. 3.]]
Finally the third example is already supported with gather_nd, you write
print tf.gather_nd(x, coords).eval()
To get
[ 5. 4. 3.]
Edit, May 6
The update x[cols,rows]=newvals can be done without using Variables (which occupy memory between session run calls) by using select with sparse_to_dense that takes vector of sparse values, or relying on dynamic_stitch
sess = tf.InteractiveSession()
x = tf.zeros((3, 4))
row_indices = tf.constant([1, 1, 2])
col_indices = tf.constant([0, 2, 3])
# no automatic promotion, so specify float type
replacement_vals = tf.constant([5, 4, 3], dtype=tf.float32)
# convert to linear indexing in row-major form
linear_indices = row_indices*x.get_shape()[1]+col_indices
x_flat = tf.reshape(x, [-1])
# use dynamic stitch, it merges the array by taking value either
# from array1[index1] or array2[index2], if indices conflict,
# the later one is used
unchanged_indices = tf.range(tf.size(x_flat))
changed_indices = linear_indices
x_flat = tf.dynamic_stitch([unchanged_indices, changed_indices],
[x_flat, replacement_vals])
x = tf.reshape(x_flat, x.get_shape())
print x.eval()