I have been trying for a while to implement sampled softmax because I have half a million output classes.
I have tried to follow the official documentation exactly, but I always get an error. This is my code:
def forward_propagation_sampled(X, parameters):
W1 = parameters['W1']
b1 = parameters['b1']
W2 = parameters['W2']
b2 = parameters['b2']
W3 = parameters['W3']
b3 = parameters['b3']
Z1 = tf.add(tf.matmul(W1, X), b1)
A1 = tf.nn.relu(Z1)
Z2 = tf.add(tf.matmul(W2,A1), b2)
A2 = tf.nn.relu(Z2)
Z3 = tf.add(tf.matmul(W3,A2), b3)
return Z3, W3, b3
This is the cost computation function:
def compute_cost(Z3, W3, b3, Y, mode):
Z3.set_shape([1144,1])
if mode == "train":
loss = tf.nn.sampled_softmax_loss(
weights=tf.transpose(W3),
biases=tf.Variable(b3),
labels = tf.reshape(tf.argmax(Y, 1), [-1,1]), #Since Y is one hot encoded
inputs=tf.Variable(initial_value=Z3,dtype=tf.float32, expected_shape=[1144,1]),
num_sampled = 2000,
num_classes = 1144,
partition_strategy="div"
)
elif mode == "eval":
logits = tf.matmul(inputs, tf.transpose(weights))
logits = tf.nn.bias_add(logits, biases)
labels_one_hot = tf.one_hot(labels, n_classes)
loss = tf.nn.softmax_cross_entropy_with_logits(labels=labels_one_hot,logits=logits)
cost = tf.reduce_mean(loss)
return cost
For the purpose of just testing this out, I am using 1144 output classes, which would otherwise scale to 500,000. There are 3144 training examples.
I get this error:
Shape must be rank 1 but is rank 2 for 'sampled_softmax_loss/Slice_1' (op: 'Slice') with input shapes: [3144,1], [1], [1].
I am unable to debug this or make any sense out of it. Any help would be really appreciated.
I am trying to estimate the forward pass and the backword gradient of the function below:
def func(img-batch, X1,X2):
L=1
A1 = X1*L**2
A2 = X2*L**2
AA1 = A1*A1
AA2 = A2*A2
A11A2 = A1*A2
v = tf.nn.conv2d(img-batch, A1A2, strides=[1, 1, 1, 1], padding='SAME')
v = v+ AA1+AA2
return v
When I add this function to the network, the gradient will be performed on each instruction of the function by default.
How can I use this function and calculate it in the forward pass, in the meantime ignoring the gradient of each instruction in the function and provide other gradient estimation and add it to the main gradient of the model?
You can use py_func to ignore the gradients in this function, and use gradient_override_map to provide customized gradients. Here is an example:
import tensorflow as tf
def myfunc(X1, X2):
L = 1
A1 = X1 * L**2
A2 = X2 * L**2
AA1 = A1 * A1
AA2 = A2 * A2
A11A2 = A1 * A2
...
v = AA1 + AA2 + A11A2
return v
#tf.RegisterGradient("GradMyfunc")
def grad_myfunc(op, grad):
X1 = op.inputs[0]
X2 = op.inputs[1]
return [grad * X2, grad * X1]
X1 = tf.Variable(tf.constant(1.1, dtype=tf.float64))
X2 = tf.Variable(tf.constant(2.2, dtype=tf.float64))
g = tf.get_default_graph()
with g.gradient_override_map({"PyFunc": "GradMyfunc"}):
y = tf.py_func(myfunc, [X1, X2], [tf.float64])
with tf.Session() as sess:
grad = tf.gradients(y, [X1, X2])
sess.run(tf.global_variables_initializer())
print(sess.run(y))
print(sess.run(grad))
Can you give me a code example that uses tf.metrics.sparse_average_precision_at_k? I cannot find anything on the Internet... :(
If I have a multi-labeled dataset like this one (each example may have more than one target label):
(total number of classes = 5)
y1 = [class_0, class_1]
y2 = [class_1, class_2]
y3 = [class_0]
and my system predicts:
prediction for y1 -> [0.1, 0.3, 0.2, 0.0, 0.0]
prediction for y2 -> [0.0, 0.3, 0.7, 0.4, 0.4]
prediction for y3 -> [0.1, 0.3, 0.2, 0.3, 0.5]
How can I compute for k=3, for example?
P.S.: Feel free to suggest your own example, if you can't comprehend this one.
EDIT: My code so far:
I really don't get it... Pls advise for a single prediction (y1 only) as well as for several predictions at once (with different number of true classes in each).
import numpy as np
import tensorflow as tf
sess = tf.InteractiveSession()
tf.local_variables_initializer().run()
y1 = tf.constant( np.array([0, 1]) )
y2 = tf.constant( np.array([1, 2]) )
y3 = tf.constant( np.array([0]) )
p1 = tf.constant( np.array([0.1, 0.3, 0.2, 0.0, 0.0]) )
p2 = tf.constant( np.array([0.0, 0.3, 0.7, 0.4, 0.4]) )
p3 = tf.constant( np.array([0.1, 0.3, 0.2, 0.3, 0.5]) )
metric, update = tf.metrics.sparse_average_precision_at_k(tf.cast(y1, tf.int64), p1, 3)
print(sess.run(update))
The tf.metrics.sparse_average_precision_at_k will be replaced by tf.metrics.average_precision_at_k. And by browsing the code in tensorflow, you will find that when your inputs are y_true and y_pred, this function will actually transform the y_pred to y_pred_idx, by using top_k function.
y_true is a tensor of shape (batch_size, num_labels), and y_pred is of shape (batch_size, num_classes)
You can also see some discussion in this issue, and this example comes from this issue.
import tensorflow as tf
import numpy as np
y_true = np.array([[2], [1], [0], [3], [0]]).astype(np.int64)
y_true = tf.identity(y_true)
y_pred = np.array([[0.1, 0.2, 0.6, 0.1],
[0.8, 0.05, 0.1, 0.05],
[0.3, 0.4, 0.1, 0.2],
[0.6, 0.25, 0.1, 0.05],
[0.1, 0.2, 0.6, 0.1]
]).astype(np.float32)
y_pred = tf.identity(y_pred)
_, m_ap = tf.metrics.sparse_average_precision_at_k(y_true, y_pred, 3)
sess = tf.Session()
sess.run(tf.local_variables_initializer())
stream_vars = [i for i in tf.local_variables()]
tf_map = sess.run(m_ap)
print(tf_map)
print((sess.run(stream_vars)))
tmp_rank = tf.nn.top_k(y_pred,3)
print(sess.run(tmp_rank))
This line stream_vars = [i for i in tf.local_variables()] helps you see the two local_variables which is created in this tf.metrics.sparse_average_precision_at_k function.
This line tmp_rank = tf.nn.top_k(y_pred,3) in order to helps you understand by changing the value of k ,the prediction index which is used in tf.metrics.sparse_average_precision_at_k .
You can change the value of k to see the different result, and the tmp_rank represents the index which is used in calculating the average precision.
For example:
when k=1, only the first batch match the label, so the average precision at 1 result will be 1/6 = 0.16666666.
When k=2, the third batch will also match the label, so the average precision at 2 result will be (1+(1/2))/6=0.25.
metric, update = tf.metrics.sparse_average_precision_at_k(tf.stack(y1, y2, y3), tf.stack(p1, p2, p3), 3)
print session.run(update)
This neural network trains on inputs [[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]] with labelled outputs : [[0.0], [1.0], [1.0], [0.0]]
import numpy as np
import tensorflow as tf
sess = tf.InteractiveSession()
sess.run(init)
# a batch of inputs of 2 value each
inputs = tf.placeholder(tf.float32, shape=[None, 2])
# a batch of output of 1 value each
desired_outputs = tf.placeholder(tf.float32, shape=[None, 1])
# [!] define the number of hidden units in the first layer
HIDDEN_UNITS = 4
weights_1 = tf.Variable(tf.truncated_normal([2, HIDDEN_UNITS]))
biases_1 = tf.Variable(tf.zeros([HIDDEN_UNITS]))
# connect 2 inputs to every hidden unit. Add bias
layer_1_outputs = tf.nn.sigmoid(tf.matmul(inputs, weights_1) + biases_1)
print layer_1_outputs
NUMBER_OUTPUT_NEURONS = 1
biases_2 = tf.Variable(tf.zeros([NUMBER_OUTPUT_NEURONS]))
weights_2 = tf.Variable(tf.truncated_normal([HIDDEN_UNITS, NUMBER_OUTPUT_NEURONS]))
finalLayerOutputs = tf.nn.sigmoid(tf.matmul(layer_1_outputs, weights_2) + biases_2)
tf.global_variables_initializer().run()
logits = tf.nn.sigmoid(tf.matmul(layer_1_outputs, weights_2) + biases_2)
training_inputs = [[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]]
training_outputs = [[0.0], [1.0], [1.0], [0.0]]
error_function = 0.5 * tf.reduce_sum(tf.sub(logits, desired_outputs) * tf.sub(logits, desired_outputs))
train_step = tf.train.GradientDescentOptimizer(0.05).minimize(error_function)
for i in range(15):
_, loss = sess.run([train_step, error_function],
feed_dict={inputs: np.array(training_inputs),
desired_outputs: np.array(training_outputs)})
print(sess.run(logits, feed_dict={inputs: np.array([[0.0, 1.0]])}))
Upon training this network returns [[ 0.61094815]] for values [[0.0, 1.0]]
[[ 0.61094815]] is value with highest probability after training this network is assign to input value [[0.0, 1.0]] ? Can the lower probability values also be accessed and not just most probable ?
If I increase number of training epochs I'll get better prediction but in this case I just want to access all potential values with their probabilities for a given input.
Update :
Have updated code to use multi class classification with softmax. But the prediction for [[0.0, 1.0, 0.0, 0.0]] is [array([0])]. Have I updated correctly ?
import numpy as np
import tensorflow as tf
init = tf.global_variables_initializer()
sess = tf.InteractiveSession()
sess.run(init)
# a batch of inputs of 2 value each
inputs = tf.placeholder(tf.float32, shape=[None, 4])
# a batch of output of 1 value each
desired_outputs = tf.placeholder(tf.float32, shape=[None, 3])
# [!] define the number of hidden units in the first layer
HIDDEN_UNITS = 4
weights_1 = tf.Variable(tf.truncated_normal([4, HIDDEN_UNITS]))
biases_1 = tf.Variable(tf.zeros([HIDDEN_UNITS]))
# connect 2 inputs to every hidden unit. Add bias
layer_1_outputs = tf.nn.softmax(tf.matmul(inputs, weights_1) + biases_1)
biases_2 = tf.Variable(tf.zeros([3]))
weights_2 = tf.Variable(tf.truncated_normal([HIDDEN_UNITS, 3]))
finalLayerOutputs = tf.nn.softmax(tf.matmul(layer_1_outputs, weights_2) + biases_2)
tf.global_variables_initializer().run()
logits = tf.nn.softmax(tf.matmul(layer_1_outputs, weights_2) + biases_2)
training_inputs = [[0.0, 0.0 , 0.0, 0.0], [0.0, 1.0 , 0.0, 0.0], [1.0, 0.0 , 0.0, 0.0], [1.0, 1.0 , 0.0, 0.0]]
training_outputs = [[0.0,0.0,0.0], [1.0,0.0,0.0], [1.0,0.0,0.0], [0.0,0.0,1.0]]
error_function = 0.5 * tf.reduce_sum(tf.sub(logits, desired_outputs) * tf.sub(logits, desired_outputs))
train_step = tf.train.GradientDescentOptimizer(0.05).minimize(error_function)
for i in range(15):
_, loss = sess.run([train_step, error_function],
feed_dict={inputs: np.array(training_inputs),
desired_outputs: np.array(training_outputs)})
prediction=tf.argmax(logits,1)
best = sess.run([prediction],feed_dict={inputs: np.array([[0.0, 1.0, 0.0, 0.0]])})
print(best)
Which prints [array([0])]
Update 2 :
Replacing
prediction=tf.argmax(logits,1)
best = sess.run([prediction],feed_dict={inputs: np.array([[0.0, 1.0, 0.0, 0.0]])})
print(best)
With :
prediction=tf.nn.softmax(logits)
best = sess.run([prediction],feed_dict={inputs: np.array([[0.0, 1.0, 0.0, 0.0]])})
print(best)
Appears to fix issue.
So now full source is :
import numpy as np
import tensorflow as tf
init = tf.global_variables_initializer()
sess = tf.InteractiveSession()
sess.run(init)
# a batch of inputs of 2 value each
inputs = tf.placeholder(tf.float32, shape=[None, 4])
# a batch of output of 1 value each
desired_outputs = tf.placeholder(tf.float32, shape=[None, 3])
# [!] define the number of hidden units in the first layer
HIDDEN_UNITS = 4
weights_1 = tf.Variable(tf.truncated_normal([4, HIDDEN_UNITS]))
biases_1 = tf.Variable(tf.zeros([HIDDEN_UNITS]))
# connect 2 inputs to every hidden unit. Add bias
layer_1_outputs = tf.nn.softmax(tf.matmul(inputs, weights_1) + biases_1)
biases_2 = tf.Variable(tf.zeros([3]))
weights_2 = tf.Variable(tf.truncated_normal([HIDDEN_UNITS, 3]))
finalLayerOutputs = tf.nn.softmax(tf.matmul(layer_1_outputs, weights_2) + biases_2)
tf.global_variables_initializer().run()
logits = tf.nn.softmax(tf.matmul(layer_1_outputs, weights_2) + biases_2)
training_inputs = [[0.0, 0.0 , 0.0, 0.0], [0.0, 1.0 , 0.0, 0.0], [1.0, 0.0 , 0.0, 0.0], [1.0, 1.0 , 0.0, 0.0]]
training_outputs = [[0.0,0.0,0.0], [1.0,0.0,0.0], [1.0,0.0,0.0], [0.0,0.0,1.0]]
error_function = 0.5 * tf.reduce_sum(tf.sub(logits, desired_outputs) * tf.sub(logits, desired_outputs))
train_step = tf.train.GradientDescentOptimizer(0.05).minimize(error_function)
for i in range(1500):
_, loss = sess.run([train_step, error_function],
feed_dict={inputs: np.array(training_inputs),
desired_outputs: np.array(training_outputs)})
prediction=tf.nn.softmax(logits)
best = sess.run([prediction],feed_dict={inputs: np.array([[0.0, 1.0, 0.0, 0.0]])})
print(best)
Which prints
[array([[ 0.49810624, 0.24845563, 0.25343812]], dtype=float32)]
Your current network does (logistic) regression, not really classification: given an input x, it tries to evaluate f(x) (where f(x) = x1 XOR x2 here, but the network does not know that before training), which is regression. To do so, it learns a function f1(x) and tries to have it be as close to f(x) on all your training samples. [[ 0.61094815]] is just the value of f1([[0.0, 1.0]]). In this setting, there is no such thing as "probability to be in a class", since there is no class. There is only the user (you) chosing to interpret f1(x) as a probability for the output to be 1. Since you have only 2 classes, that tells you that the probability of the other class is 1-0.61094815 (that is, you're doing classification with the output of the network, but it is not really trained to do that in itself). This method used as classification is, in a way, a (widely used) trick to perform classification, but only works if you have 2 classes.
A real network for classification would be built a bit differently: your logits would be of shape (batch_size, number_of_classes) - so (1, 2) in your case-, you apply a sofmax on them, and then the prediction is argmax(softmax), with probability max(softmax). Then you can also get the probability of each output, according to the network: probability(class i) = softmax[i]. Here the network is really trained to learn the probability of x being in each class.
I'm sorry if my explanation is obscure or if the difference between regression between 0 and 1 and classification seems philosophical in a setting with 2 classes, but if you add more classes you'll probably see what I mean.
EDIT
Answer to your 2 updates.
in your training samples, the labels (training_outputs) must be probability distributions, i.e. they must have sum 1 for each sample (99% of the time they are of the form (1, 0, 0), (0, 1, 0) or (0, 0, 1)), so your first output [0.0,0.0,0.0] is not valid. If you want to learn XOR on the two first inputs, then the 1st output should be the same as the last: [0.0,0.0,1.0].
prediction=tf.argmax(logits,1) = [array([0])] is completely normal: loginscontains your probabilities, and prediction is the prediction, which is the class with the biggest probability, which is in your case class 0: in your training set, [0.0, 1.0, 0.0, 0.0] is associated with output [1.0, 0.0, 0.0], i.e. it is of class 0 with probability 1, and of the other classes with probability 0. After enough training, print(best) with prediction=tf.argmax(logits,1) on input [1.0, 1.0 , 0.0, 0.0] should give you [array([2])], 2 being the index of the class for this input in your training set.
I expected the gradient for tf.sign() in TensorFlow to be equal to 0 or None. However, when I examined the gradients, I found that they were equal to very small numbers (e.g. 1.86264515e-09). Why is that?
(If you are curious as to why I even want to know this, it is because I want to implement the "straight-through estimator" described here, and before overriding the gradient for tf.sign(), I wanted to check that the default behavior was in fact what I was expecting.)
EDIT: Here is some code which reproduces the error. The model is just the linear regression model from the introduction to TensorFlow, except that I use y=sign(W)x + b instead of y=Wx + b.
import tensorflow as tf
import numpy as np
def gvdebug(g, v):
g2 = tf.zeros_like(g, dtype=tf.float32)
v2 = tf.zeros_like(v, dtype=tf.float32)
g2 = g
v2 = v
return g2,v2
# Create 100 phony x, y data points in NumPy, y = x * 0.1 + 0.3
x_data = np.random.rand(100).astype(np.float32)
y_data = x_data * 0.1 + 0.3
# Try to find values for W and b that compute y_data = W * x_data + b
# (We know that W should be 0.1 and b 0.3, but TensorFlow will
# figure that out for us.)
W = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
b = tf.Variable(tf.zeros([1]))
y = tf.sign(W) * x_data + b
# Minimize the mean squared errors.
loss = tf.reduce_mean(tf.square(y - y_data))
optimizer = tf.train.GradientDescentOptimizer(0.5)
grads_and_vars = optimizer.compute_gradients(loss)
gv2 = [gvdebug(gv[0], gv[1]) for gv in grads_and_vars]
apply_grads = optimizer.apply_gradients(gv2)
# Before starting, initialize the variables. We will 'run' this first.
init = tf.initialize_all_variables()
# Launch the graph.
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.01)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
sess.run(init)
# Fit the line.
for step in range(201):
sess.run(apply_grads)
if (step % 20 == 0) or ((step-1) % 20 == 0):
print("")
print(sess.run(gv2[0][1])) #the variable
print(sess.run(gv2[0][0])) #the gradient
print("")
print(step, sess.run(W), sess.run(b))
# Learns best fit is W: [0.1], b: [0.3]