tf.math.bincount - use min/max weight instead of weight sum - tensorflow

I would like to get a max/min value in tf.math.bincount instead of the weight sum. Basically currently it works as:
values = tf.constant([1,1,2,3,2,4,4,5])
weights = tf.constant([1,5,0,1,0,5,4,5])
tf.math.bincount(values, weights=weights) #[0 6 0 1 9 5]
However, I would like to get max/min for the conflicting weights instead, e.g. for max it should return:
[0 5 0 1 5 5]

It requires some finessing, but you can accomplish this as follows:
def bincount_with_max_weight(values: tf.Tensor, weights: tf.Tensor) -> tf.Tensor:
_range = tf.range(tf.reduce_max(values) + 1)
return tf.map_fn(lambda x: tf.maximum(
tf.reduce_max(tf.gather(weights, tf.where(tf.equal(values, x)))), 0), _range)
The output for the example case is:
[0 5 0 1 5 5]
Breaking it down, the first line computes the range of values in values:
_range = tf.range(tf.reduce_max(values) + 1)
and in the second line, the maximum of weight is computed per element in _range using tf.map_fn with tf.where, which retrieves indices where the clause is true, and tf.gather, which retrieves the values corresponding to supplied indices.
The tf.maximum wraps the output to handle the case where the element does not exist in values i.e; in the example case, 0 does not exist in values so the output without tf.maximum would be INT_MIN for 0:
[-2147483648 5 0 1 5 5]
This could also be applied on the final result tensor instead of per element:
def bincount_with_max_weight(values: tf.Tensor, weights: tf.Tensor) -> tf.Tensor:
_range = tf.range(tf.reduce_max(values) + 1)
result = tf.map_fn(lambda x:
tf.reduce_max(tf.gather(weights, tf.where(tf.equal(values, x)))), _range)
return tf.maximum(result, 0)
Note that this would not work if negative weights are utilized - in that case, tf.where can be used for comparing against the minimum integer value (tf.int32.min in the example, although this can be applied for any numeric dtype) instead of applying tf.maximum:
def bincount_with_max_weight(values: tf.Tensor, weights: tf.Tensor) -> tf.Tensor:
_range = tf.range(tf.reduce_max(values) + 1)
result = tf.map_fn(lambda x:
tf.reduce_max(tf.gather(weights, tf.where(tf.equal(values, x)))), _range)
return tf.where(tf.equal(result, tf.int32.min), 0, result)
Update
For handling the 2D Tensor case, we can use tf.map_fn to apply the maximum weight function to each pair of values and weights in the batch:
def bincount_with_max_weight(values: tf.Tensor, weights: tf.Tensor, axis: Optional[int] = None) -> tf.Tensor:
_range = tf.range(tf.reduce_max(values) + 1)
def mapping_function(x: int, _values: tf.Tensor, _weights: tf.Tensor) -> tf.Tensor:
return tf.reduce_max(tf.gather(_weights, tf.where(tf.equal(_values, x))))
if axis == -1:
result = tf.map_fn(lambda pair: tf.map_fn(lambda x: mapping_function(x, *pair), _range), (values, weights),
dtype=tf.int32)
else:
result = tf.map_fn(lambda x: mapping_function(x, values, weights), _range)
return tf.where(tf.equal(result, tf.int32.min), 0, result)
For the 2D example provided:
values = tf.constant([[1, 1, 2, 3], [2, 1, 4, 5]])
weights = tf.constant([[1, 5, 0, 1], [0, 5, 4, 5]])
print(bincount_with_max_weight(values, weights, axis=-1))
The output is:
tf.Tensor(
[[0 5 0 1 0 0]
[0 5 0 0 4 5]], shape=(2, 6), dtype=int32)
This implementation is a generalization of the approach originally described - if axis is omitted, it will compute results for the 1D case.

For Faster Execution try this,
values = tf.constant([[1,1,2,3], [2,1,4,5]])
weights = tf.constant([[1,5,0,1], [0,5,4,5]])
def find_max_bins(output , values , weights):
np.maximum.at(output , values , weights)
return output
#tf.function(input_signature=[tf.TensorSpec(shape=[None], dtype = tf.float32),
tf.TensorSpec(shape=[None], dtype = tf.int32),
tf.TensorSpec(shape=[None], dtype = tf.int32)
])
def tf_function(output , values , weights):
print(values)
y = tf.numpy_function(find_max_bins, [output , values , weights], tf.float32)
return y
length = np.max(values)+1
initial_value = [0 for x in range(length)]
variable = tf.Variable(initial_value = initial_value, shape=(length) , dtype=tf.float32)
for i , (value , weight) in enumerate(zip(values , weights)):
if(i > 0):
output = tf.stack([output , tf_function(variable , value , weight)] , 0)
else:
output = tf_function(variable , value , weight)
variable.assign_sub(initial_value)
Output:
<tf.Tensor: shape=(2, 6), dtype=float32, numpy=
array([[0., 5., 0., 1., 0., 0.],
[0., 5., 0., 0., 4., 5.]], dtype=float32)>

Related

Autodiff implementation for gradient calculation

I have worked through some papers about the autodiff algorithm to implement it for myself (for learning purposes). I compared my algorithm in test cases to the output of tensorflow and their outputs did not match in most cases. Therefor i worked through the tutorial from this side and implemented it with tensorflow operations just for the matrix multiplication operation since that was one of the operations that did not work:
gradient of matmul and unbroadcast method:
def gradient_matmul(node, dx, adj):
# dx is needed to know which of both parents should be derived
a = node.parents[0]
b = node.parents[1]
# the operation was node.tensor = tf.matmul(a.tensor, b,tensor)
if a == dx or b == dx:
# result depends on which of the parents is the derivative
mm = tf.matmul(adj, tf.transpose(b.tensor)) if a == dx else \
tf.matmul(tf.transpose(a.tensor), adj)
return mm
else:
return None
def unbroadcast(adjoint, node):
dim_a = len(adjoint.shape)
dim_b = len(node.shape)
if dim_a > dim_b:
sum = tuple(range(dim_a - dim_b))
res = tf.math.reduce_sum(adjoint, axis = sum)
return res
return adjoint
And finally the gradient calculation autodiff algorithm:
def gradient(y, dx):
working = [y]
adjoints = defaultdict(float)
adjoints[y] = tf.ones(y.tensor.shape)
while len(working) != 0:
curr = working.pop(0)
if curr == dx:
return adjoints[curr]
if curr.is_store:
continue
adj = adjoints[curr]
for p in curr.parents:
# for testing with matrix multiplication as only operation
local_grad = gradient_matmul(curr, p, adj)
adjoints[p] = unbroadcast(tf.add(adjoints[p], local_grad), p.tensor)
if not p in working:
working.append(p)
Yet it produces the same output as my initial implementation.
I constructed a matrix multiplication test case:
x = tf.constant([[[1.0, 1.0], [2.0, 3.0]], [[4.0, 5.0], [6.0, 7.0]]])
y = tf.constant([[3.0, -7.0], [-1.0, 5.0]])
z = tf.constant([[[1, 1], [2.0, 2]], [[3, 3], [-1, -1]]])
w = tf.matmul(tf.matmul(x, y), z)
Where w should be derived for each of the variables.
Tensorflow calculates the gradient:
[<tf.Tensor: shape=(2, 2, 2), dtype=float32, numpy=
array([[[-22., 18.],
[-22., 18.]],
[[ 32., -16.],
[ 32., -16.]]], dtype=float32)>, <tf.Tensor: shape=(2, 2), dtype=float32, numpy=
array([[66., -8.],
[80., -8.]], dtype=float32)>, <tf.Tensor: shape=(2, 2, 2), dtype=float32, numpy=
array([[[ 5., 5.],
[ -1., -1.]],
[[ 18., 18.],
[-10., -10.]]], dtype=float32)>]
My implementation calculates:
[[[-5. 7.]
[-5. 7.]]
[[-5. 7.]
[-5. 7.]]]
[[33. 22.]
[54. 36.]]
[[[ 9. 9.]
[14. 14.]]
[[-5. -5.]
[-6. -6.]]]
Maybe the problem is the difference between numpys dot and tensorflows matmul?
But then i don't know to fix the gradient or unbroadcast for the tensorflow method...
Thanks for taking the time to look over my code! :)
I found the error, the gradient matmul should have been:
def gradient_matmul(node, dx, adj):
a = node.parents[0]
b = node.parents[1]
if a == dx:
return tf.matmul(adj, b.tensor, transpose_b=True)
elif b == dx:
return tf.matmul(a.tensor, adj, transpose_a=True)
else:
return None
Since i only want to transpose the last 2 dimensions

Weird shape requirement for `sample_weight` argument in Keras losses in TF2.0

According to TF document, the the sample_weight argument can have shape [batch_size]. The relevant documentation is quoted below:
sample_weight: Optional Tensor whose rank is either 0, or the same rank as y_true, or is broadcastable to y_true. sample_weight acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If sample_weight is a tensor of size [batch_size], then the total loss for each sample of the batch is rescaled by the corresponding element in the sample_weight vector. If the shape of sample_weight matches the shape of y_pred, then the loss of each measurable element of y_pred is scaled by the corresponding value of sample_weight.
However, I cannot understand why the following code does not work.
import tensorflow as tf
gt = tf.convert_to_tensor([1, 1, 1, 1, 1])
pred = tf.convert_to_tensor([1., 0., 1., 1., 0.])
sample_weights = tf.convert_to_tensor([0, 1, 0, 0, 0])
loss = tf.keras.losses.BinaryCrossentropy()(gt, pred, sample_weight=sample_weights)
print(loss)
The code throw this error:
tensorflow.python.framework.errors_impl.InvalidArgumentError: Can not squeeze dim[0], expected a dimension of 1, got 5 [Op:Squeeze]
If I expand the dimensions of gt, pred, and sample_weights, then it works properly and output the expected loss value of 3.0849898.
import tensorflow as tf
gt = tf.convert_to_tensor([1, 1, 1, 1, 1])
pred = tf.convert_to_tensor([1., 0., 1., 1., 0.])
sample_weights = tf.convert_to_tensor([0, 1, 0, 0, 0])
# expand dims
gt = tf.expand_dims(gt, 1)
pred = tf.expand_dims(pred, 1)
sample_weights = tf.expand_dims(sample_weights, 1)
loss = tf.keras.losses.BinaryCrossentropy()(gt, pred, sample_weight=sample_weights)
print(loss) # loss is 3.0849898
The problem is not about sample_weight shape. It's pred and gt shape which should be [batch_size, n_labels]:
import tensorflow as tf
gt = tf.convert_to_tensor([1, 1, 1, 1, 1])
pred = tf.convert_to_tensor([1., 0., 1., 1., 0.])
sample_weights = tf.convert_to_tensor([0, 1, 0, 0, 0])
# expand dims
gt = tf.expand_dims(gt, 1)
pred = tf.expand_dims(pred, 1)
print(gt.shape, pred.shape) #(5, 1) (5, 1)
loss = tf.keras.losses.BinaryCrossentropy()(gt, pred, sample_weight=sample_weights)
print(loss) # loss is 3.0849898

Discrepancy between tensorflow's conv1d and pytorch's conv1d

I am trying to import some pytorch code to tensorflow, I came to know that torch.nn.functional.conv1d() is tf.nn.conv1d() but I am afraid there are still some discrepancies in tf's versions. Specifically, I cannot find the group parameter in tf.conv1d. For example: the following codes output two different results:
Pytorch:
inputs = torch.Tensor([[[1, 1, 1, 1],[2, 2, 2, 2],[3, 3, 3, 3]]]) #batch_sizex seq_length x embed_dim,
inputs = inputs.transpose(2,1) #batch_size x embed_dim x seq_length
batch_size, embed_dim, seq_length = inputs.size()
kernel_size = 3
in_channels = 2
out_channels = in_channels
weight = torch.ones(out_channels, 1, kernel_size)
inputs = inputs.contiguous().view(-1, in_channels, seq_length) #batch_size*embed_dim/in_channels x in_channels x seq_length
inputs = F.pad(inputs, (kernel_size-1,0), 'constant', 0)
output = F.conv1d(inputs, weight, padding=0, groups=in_channels)
output = output.contiguous().view(batch_size, embed_dim, seq_length).transpose(2,1)
Output:
tensor([[[1., 1., 1., 1.],
[3., 3., 3., 3.],
[6., 6., 6., 6.]]])
Tensorflow:
inputs = tf.constant([[[1, 1, 1, 1],[2, 2, 2, 2],[3, 3, 3, 3]]], dtype=tf.float32) #batch_sizex seq_length x embed_dim
inputs = tf.transpose(inputs, perm=[0,2,1])
batch_size, embed_dim, seq_length = inputs.get_shape()
print(batch_size, seq_length, embed_dim)
kernel_size = 3
in_channels = 2
out_channels = in_channels
weight = tf.ones([kernel_size, in_channels, out_channels])
inputs = tf.reshape(inputs, [(batch_size*embed_dim)//in_channels, in_channels, seq_length], name='inputs')
inputs = tf.transpose(inputs, perm=[0, 2, 1])
padding = [[0, 0], [(kernel_size - 1), 0], [0, 0]]
padded = tf.pad(inputs, padding)
res = tf.nn.conv1d(padded, weight, 1, 'VALID')
res = tf.transpose(res, perm=[0, 2, 1])
res = tf.reshape(res, [batch_size, embed_dim, seq_length])
res = tf.transpose(res, perm=[0, 2, 1])
print(res)
Output:
[[[ 2. 2. 2. 2.]
[ 6. 6. 6. 6.]
[12. 12. 12. 12.]]], shape=(1, 3, 4), dtype=float32)
Different results
There is no discrepancy between those versions, you are just setting up different things. To get exactly same results as in Tensorflow change the lines specifying weights to:
weight = torch.ones(out_channels, 2, kernel_size)
, because your input has two input channels, as you have correctly declared in TF:
weight = tf.ones([kernel_size, in_channels, out_channels])
Groups parameter
You have misunderstood what is groups parameter responsible for in pytorch. It restricts the number of channels each filter uses (in this case only one as 2 input_channels divided by 2 give us one).
See here for more intuitive explanation for 2D convolution.

How to implement the time sequence shown as following

The ST(spatial transform) has two input. the first one is Fi, which is fixed. The another is M, which is varied according to the output of last LSTM. The input of LTSM depends on the output of ST and the state of last LSTM.
The easiest way to do this is probably to write your own RNN cell. Another way is to use tf.raw_rnn. Check out this post or this excellent article.
Actually,I implement the network as following:
def build_model(self):
lstm_cell = tf.contrib.rnn.BasicLSTMCell(
num_units=self.config.num_lstm_units, state_is_tuple=True, reuse=True)
if self.mode == "train":
lstm_cell = tf.contrib.rnn.DropoutWrapper(
lstm_cell,
input_keep_prob=self.config.lstm_dropout_keep_prob,
output_keep_prob=self.config.lstm_dropout_keep_prob)
with tf.variable_scope("lstm", initializer=self.initializer) as lstm_scope:
zero_state = lstm_cell.zero_state(
batch_size=self.image_embeddings.get_shape()[0], dtype=tf.float32)
K = 5
C = 80
scores = tf.Variable(tf.random_normal(shape=[K, self.config.batch_size, C]), name="scores")
M = tf.Variable(tf.random_normal(shape=[K+1, self.config.batch_size, 2, 3]), name="M")
tf.assign(M[0], tf.convert_to_tensor([[1., 0., 0.], [0., 1., 0.]]))
lstm_input_size = 14
zk_size = 4096
hidden = zero_state
for k in range(0, K+1):
# Allow the LSTM variables to be reused.
if k > 0:
lstm_scope.reuse_variables()
f_k = spatial_transformer_network.spatial_transformer_network(self.image_embeddings, M[k])
f_k = tf.nn.max_pool(f_k, [1,2,2,1], [1,1,1,1], padding='VALID')
f_k = tf.layers.dense(tf.reshape(f_k, [self.config.batch_size, int(lstm_input_size * lstm_input_size / 4 * 512)]), 4096)
lstm_outputs, hidden = lstm_cell(f_k, hidden)
z_k = tf.layers.dense(hidden[0], zk_size, activation=tf.nn.relu)
if k != 0:
tf.assign(scores[k - 1], (tf.layers.dense(z_k, C)))
if k != K:
tf.assign(M[k + 1], (tf.reshape(tf.layers.dense(z_k, 6), [self.config.batch_size, 2, 3])))
tf.assign(M[k + 1, :, 0, 1], (tf.convert_to_tensor(0.)))
tf.assign(M[k + 1, :, 1, 0], (tf.convert_to_tensor(0.)))
But it throws error when it runs to
lstm_outputs, hidden = lstm_cell(f_k, hidden).
The error info is:
ValueError: Variable lstm/basic_lstm_cell/kernel does not exist, or was not created with tf.get_variable(). Did you mean to set reuse=tf.AUTO_REUSE in VarScope?
So what's the problem?

Is there any workaround to unstack a tensor along a dimension with variable length?

I need to loop over the first dimension of which the length is variable, how can I manage to do it? If impossible any workarounds?
tf.unstack along the dynamic dimension is not supported:
If value.shape[axis] is not known, ValueError is raised.
But you can try to use tf.while_loop to iterate over the tensor slices. Here's an example that computes the sum:
# Input tensor: trying to iterate along axis=0
x = tf.placeholder(dtype=tf.float32, shape=[None, 3])
batch_size = tf.shape(x)[0]
def cond(x, i, _):
return i < batch_size
def body(x, i, x_prev):
# Do some operation with `x_prev` and `x[i]`. Here we just add the slices
sum = x_prev + x[i]
return x, i + 1, sum
# This means: starting from 0, apply the body, while the `cond` is true
_, _, c = tf.while_loop(cond, body, (x, 0, tf.zeros([3])))
# Test it
with tf.Session() as sess:
data = np.arange(12).reshape([4, 3])
print(data)
result = sess.run(c, feed_dict={x: data})
print(result)
Output:
[[ 0 1 2]
[ 3 4 5]
[ 6 7 8]
[ 9 10 11]]
[ 18. 22. 26.]