I'm trying to use a Targmax tensor to index a tensor.
In numpy you can do the following indexing:
mat = np.random.uniform(size = 3*10*10).reshape((3,10,10))
indices = [np.array([0,0,1,2]),np.array([1,1,2,3]), np.array([1,3,0,3])]
mat[indices]
Is there an equivalent operation in tensorflow?
x = tf.constant([[1,2],[3,4]])
sess = tf.Session()
sess.run(tf.gather_nd(x,[[0,0],[1,1]]))
Out
array([1, 4], dtype=int32)
Related
What is the difference between SparseTensor and SparseTensorValue? Is there anything I should keep in mind if I want to build the sparse tensor based on fed indices and values? I could only find a few toy examples.
It depends on where you define your Sparse Tensor.
If you would like to define the tensor outside the graph, e.g. define the sparse tensor for later data feed, use SparseTensorValue. In contrast, if the sparse tensor is defined in graph, use SparseTensor
Sample code for tf.SparseTensorValue:
x_sp = tf.sparse_placeholder(dtype=tf.float32)
W = tf.Variable(tf.random_normal([6, 6]))
y = tf.sparse_tensor_dense_matmul(sp_a=x_sp, b=W)
init = tf.global_variables_initializer()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
sess.run(init)
stv = tf.SparseTensorValue(indices=[[0, 0], [1, 2]], values=[1.1, 1.2],
dense_shape=[2,6])
result = sess.run(y,feed_dict={x_sp:stv})
print(result)
Sample code for tf.SparseTensor:
indices_i = tf.placeholder(dtype=tf.int64, shape=[2, 2])
values_i = tf.placeholder(dtype=tf.float32, shape=[2])
dense_shape_i = tf.placeholder(dtype=tf.int64, shape=[2])
st = tf.SparseTensor(indices=indices_i, values=values_i, dense_shape=dense_shape_i)
W = tf.Variable(tf.random_normal([6, 6]))
y = tf.sparse_tensor_dense_matmul(sp_a=st, b=W)
init = tf.global_variables_initializer()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
sess.run(init)
result = sess.run(y,feed_dict={indices_i:[[0, 0], [1, 2]], values_i:[1.1, 1.2], dense_shape_i:[2,6]})
print(result)
Hope this help~
Now i have a tensor with shape (3*2, 2) look like
and i want to reshape it with the shape (3, 2*2) with the specific axis like following:
what should i do? The default tf.reshape() will reshape it to
SOLUTION: I found that use slice in tensorflow and tf.concat() can solve the problem.You can slice sub-tensors and concat them which solve my problem exactly
I tried the following code and got the result that you need. But not sure whether the number of steps can be reduced.
import tensorflow as tf
x = [[1, 2],
[3,4],
[5,6],
[7,8],
[9,10],
[11,12]]
a = tf.reshape(x,[-1,6])
b = tf.split(a,3, 1)
c = tf.reshape(b,[-1,4])
X=tf.placeholder(tf.float32, shape=[6, 2], name='input')
with tf.Session() as sess:
c = sess.run(c, feed_dict={X: x})
print(c)
Hope this helps.
I am trying to pass in a one-dimensional sparse vector to Tensorflow:
import tensorflow as tf
import numpy as np
x = tf.sparse_placeholder(tf.float32)
y = tf.sparse_reduce_sum(x)
with tf.Session() as sess:
indices = np.array([0, 1], dtype=np.int64)
values = np.array([1.5, 3.0], dtype=np.float32)
shape = np.array([2], dtype=np.int64)
print(sess.run(y, feed_dict={
x: tf.SparseTensorValue(indices, values, shape)}))
This code throws the following error:
ValueError: Cannot feed value of shape (2,) for Tensor u'Placeholder_2:0', which has shape '(?, ?)'
Am I passing the shape wrong?
The indices should be of size (2,1). So change the indices to: indices = np.array([[0], [1]], dtype=np.int64). The below code works:
x = tf.sparse_placeholder(tf.float32)
y = tf.sparse_reduce_sum(x)
with tf.Session() as sess:
indices = np.array([[0], [1]], dtype=np.int64)
values = np.array([1.5, 3.0], dtype=np.float32)
shape = np.array([2], dtype=np.int64)
print(sess.run(y, feed_dict={
x: tf.SparseTensorValue(indices, values, shape)}))
#Output
#4.5
What I want to do
M = tf.concat([tensor]*N, axix = 0)
But now, N is a tensor that decided in run time.
other_tensor = tf.placeholder(dtype=tf.int32, shape=[None, 2])
N = tf.shape(other_tensor)[0] # N is None, and it is decided in run time.
So, how to do this?
You should use tf.tile, not concat. To get the shape, use tensor.get_shape Here is an example:
import tensorflow as tf
a = tf.constant([[1, 2], [3, 4]])
b = tf.constant([1, 2])
c = tf.tile(a, (1, int(a.get_shape()[0])))
with tf.Session() as sess:
print sess.run(c)
If you need your tensor to have a slightly different shape, read about the second parameter in tile function and also use tf.reshape
As generalized slicing is being worked on in this issue, what would be the best way to achieve an op gathering columns of a 2D tensor (matrix)? For example, for tensor t:
1 2 3 4
5 6 7 8
and indices [1,3], I would like to get:
2 4
6 8
which is equivalent to numpy t[:, [1,3]].
Meanwhile the gather method has an axis parameter.
import tensorflow as tf
params = tf.constant([[1,2,3],[4,5,6]])
indices = [0,2]
op = tf.gather(params, indices, axis=1)
produces the output
[[1 3]
[4 6]]
There is a function named tf.nn.embedding_lookup(params, ind) which retrieves the rows of the params tensor.
To achieve what you want, we can first transpose the tensor t from which you want to select certain columns from. Then look up the rows of tf.transpose(t) (columns of t). After the selection, we transpose the result back.
import tensorflow as tf
t = tf.constant([[1, 2, 3],
[4, 5, 6]])
ind = tf.constant([0, 2])
result = tf.transpose(tf.nn.embedding_lookup(tf.transpose(t), ind))
with tf.Session() as sess:
print(sess.run(result))
So far, I created a workaround by flattening the input and using gather:
def gather_cols(params, indices, name=None):
"""Gather columns of a 2D tensor.
Args:
params: A 2D tensor.
indices: A 1D tensor. Must be one of the following types: ``int32``, ``int64``.
name: A name for the operation (optional).
Returns:
A 2D Tensor. Has the same type as ``params``.
"""
with tf.op_scope([params, indices], name, "gather_cols") as scope:
# Check input
params = tf.convert_to_tensor(params, name="params")
indices = tf.convert_to_tensor(indices, name="indices")
try:
params.get_shape().assert_has_rank(2)
except ValueError:
raise ValueError('\'params\' must be 2D.')
try:
indices.get_shape().assert_has_rank(1)
except ValueError:
raise ValueError('\'indices\' must be 1D.')
# Define op
p_shape = tf.shape(params)
p_flat = tf.reshape(params, [-1])
i_flat = tf.reshape(tf.reshape(tf.range(0, p_shape[0]) * p_shape[1],
[-1, 1]) + indices, [-1])
return tf.reshape(tf.gather(p_flat, i_flat),
[p_shape[0], -1])
Which for:
params = tf.constant([[1, 2, 3],
[4, 5, 6]])
indices = [0, 2]
op = gather_cols(params, indices)
produces the expected output:
[[1 3]
[4 6]]