When I used the Tensorflow to build a character level RNN network, I was confused by the change of the weight in the model. I thought the weight won't update in one batch.
But the kernel(which I think is Wk) was changing. And there were 6 kernels. So I am confused why it changed and why there were 6 kernels. Can I get W and U directly using Tensorflow. Here is my code. Thanks.
import tensorflow as tf
import numpy as np
sess = tf.InteractiveSession()
h = [1, 0, 0, 0]
e = [0, 1, 0, 0]
l = [0, 0, 1, 0]
o = [0, 0, 0, 1]
with tf.variable_scope('two_sequances') as scope:
# One cell RNN input_dim (4) -> output_dim (2). sequence: 5
hidden_size = 2
cell = tf.contrib.rnn.BasicRNNCell(num_units=hidden_size)
x_data = np.array([[h, e, l, l, o]], dtype=np.float32)
print(x_data.shape)
print(x_data)
outputs, _states = tf.nn.dynamic_rnn(cell, x_data, dtype=tf.float32)
sess.run(tf.global_variables_initializer())
results, state = sess.run([outputs, _states])
variable_names = [v.name for v in tf.global_variables()]
values = sess.run(variable_names)
for k, v in zip(variable_names, values):
print(k, v)
And the output of the variables are as follow.
two_sequances/rnn/basic_rnn_cell/kernel:0
[[ 0.6147509 0.6268855 ]
[ 0.34818882 0.8140872 ]
[ 0.4074654 0.011693 ]
[-0.5032909 -0.69920516]
[ 0.62231725 0.18967694]
[ 0.6888749 0.77280706]]
two_sequances/rnn/basic_rnn_cell/bias:0
[0. 0.]
Related
Suppose I have a tensor A of shape (m, n), I would like to randomly sample k elements (without replacement) from each row, resulting in a tensor B of shape (m, k). How to do that in tensorflow?
An example would be:
A: [[1,2,3], [4,5,6], [7,8,9], [10,11,12]]
k: 2
B: [[1,3],[5,6],[9,8],[12,10]]
This is a way to do that:
import tensorflow as tf
with tf.Graph().as_default(), tf.Session() as sess:
tf.random.set_random_seed(0)
a = tf.constant([[1,2,3], [4,5,6], [7,8,9], [10,11,12]], tf.int32)
k = tf.constant(2, tf.int32)
# Tranpose, shuffle, slice, undo transpose
aT = tf.transpose(a)
aT_shuff = tf.random.shuffle(aT)
at_shuff_k = aT_shuff[:k]
result = tf.transpose(at_shuff_k)
print(sess.run(result))
# [[ 3 1]
# [ 6 4]
# [ 9 7]
# [12 10]]
As above. I tried those to no avail:
tf.random.shuffle( (a,b) )
tf.random.shuffle( zip(a,b) )
I used to concatenate them and do the shuffling, then unconcatenate / unpack. But now I'm in a situation where (a) is 4D rank tensor while (b) is 1D, so, no way to concatenate.
I also tried to give the seed argument to the shuffle method so it reproduces the same shuffling and I use it twice => Failed. Also tried to do the shuffling myself with randomly shuffled range of numbers, but TF is not as flexible as numpy in fancy indexing and stuff ==> failed.
What I'm doing now is, convert everything back to numpy then use shuffle from sklearn then go back to tensors by recasting. It is sheer stupid way. This is supposed to happen inside a graph.
You could just shuffle the indices and then use tf.gather() to extract values corresponding to those shuffled indices:
TF2.x (UPDATE)
import tensorflow as tf
import numpy as np
x = tf.convert_to_tensor(np.arange(5))
y = tf.convert_to_tensor(['a', 'b', 'c', 'd', 'e'])
indices = tf.range(start=0, limit=tf.shape(x)[0], dtype=tf.int32)
shuffled_indices = tf.random.shuffle(indices)
shuffled_x = tf.gather(x, shuffled_indices)
shuffled_y = tf.gather(y, shuffled_indices)
print('before')
print('x', x.numpy())
print('y', y.numpy())
print('after')
print('x', shuffled_x.numpy())
print('y', shuffled_y.numpy())
# before
# x [0 1 2 3 4]
# y [b'a' b'b' b'c' b'd' b'e']
# after
# x [4 0 1 2 3]
# y [b'e' b'a' b'b' b'c' b'd']
TF1.x
import tensorflow as tf
import numpy as np
x = tf.placeholder(tf.float32, (None, 1, 1, 1))
y = tf.placeholder(tf.int32, (None))
indices = tf.range(start=0, limit=tf.shape(x)[0], dtype=tf.int32)
shuffled_indices = tf.random.shuffle(indices)
shuffled_x = tf.gather(x, shuffled_indices)
shuffled_y = tf.gather(y, shuffled_indices)
Make sure that you compute shuffled_x, shuffled_y in the same session run. Otherwise they might get different index orderings.
# Testing
x_data = np.concatenate([np.zeros((1, 1, 1, 1)),
np.ones((1, 1, 1, 1)),
2*np.ones((1, 1, 1, 1))]).astype('float32')
y_data = np.arange(4, 7, 1)
print('Before shuffling:')
print('x:')
print(x_data.squeeze())
print('y:')
print(y_data)
with tf.Session() as sess:
x_res, y_res = sess.run([shuffled_x, shuffled_y],
feed_dict={x: x_data, y: y_data})
print('After shuffling:')
print('x:')
print(x_res.squeeze())
print('y:')
print(y_res)
Before shuffling:
x:
[0. 1. 2.]
y:
[4 5 6]
After shuffling:
x:
[1. 2. 0.]
y:
[5 6 4]
I've build a simple recurrent neural net with one hidden layer with 4 nodes in it. This is my code:
import tensorflow as tf
# hyper parameters
learning_rate = 0.0001
number_of_epochs = 10000
# Computation Graph
W1 = tf.Variable([[1.0, 1.0, 1.0, 1.0]], dtype=tf.float32, name = 'W1')
W2 = tf.Variable([[1.0], [1.0], [1.0], [1.0]], dtype=tf.float32, name = 'W2')
WR = tf.Variable([[1.0, 1.0, 1.0, 1.0]], dtype=tf.float32, name = 'WR')
# b = tf.Variable([[0], [0], [0], [0]], dtype=tf.float32)
prev_val = [[0.0]]
X = tf.placeholder(tf.float32, [None, None], name = 'X')
labels = tf.placeholder(tf.float32, [None, 1], name = 'labels')
sess = tf.Session()
sess.run(tf.initialize_all_variables())
z = tf.matmul(X, W1) + tf.matmul(prev_val, WR)# - b
prev_val = z
predict = tf.matmul(z, W2)
error = tf.reduce_mean((labels - predict)**2)
train = tf.train.GradientDescentOptimizer(learning_rate).minimize(error)
time_series = [1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
lbsx = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0]
for i in range(number_of_epochs):
for j in range(len(time_series)):
curr_X = time_series[j]
lbs = lbsx[j]
sess.run(train, feed_dict={X: [[curr_X]], labels: [[lbs]]})
print(sess.run(predict, feed_dict={X: [[0]]}))
print(sess.run(predict, feed_dict={X: [[1]]}))
I'm getting output:
[[ 0.]]
[[ 3.12420416e-05]]
With input 1, it should output 0 and vice versa. I'm also confused regarding the 'previous value'. Should it be a placeholder? I'd really appreciate your efforts to fix the code.
I would like to achieve something similar:
https://rootpy.github.io/root_numpy/_images/plot_multiclass_1.png
What would be the most elegant solution? Get the weights, bias, function and data and plot it with some other tool or does TensorFlow have support for that?
As far as I know, Tensorflow does not directly support plotting decision boundaries.
It is certainly not the most elegant solution, but you can create a grid. Classify each point of the grid and then plot it. For example:
#!/usr/bin/env python
"""
Solve the XOR problem with Tensorflow.
The XOR problem is a two-class classification problem. You only have four
datapoints, all of which are given during training time. Each datapoint has
two features:
x o
o x
As you can see, the classifier has to learn a non-linear transformation of
the features to find a propper decision boundary.
"""
__author__ = "Martin Thoma"
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
# The training data
XOR_X = [[0, 0], [0, 1], [1, 0], [1, 1]] # Features
XOR_Y = [[0], [1], [1], [0]] # Class labels
XOR_Y = [[1, 0], [0, 1], [0, 1], [1, 0]] # Target values
assert len(XOR_X) == len(XOR_Y) # sanity check
# The network
nb_classes = 2
input_ = tf.placeholder(tf.float32,
shape=[None, len(XOR_X[0])],
name="input")
target = tf.placeholder(tf.float32,
shape=[None, nb_classes],
name="output")
nb_hidden_nodes = 2
# enc = tf.one_hot([0, 1], 2)
w1 = tf.Variable(tf.random_uniform([2, nb_hidden_nodes], -1, 1),
name="Weights1")
w2 = tf.Variable(tf.random_uniform([nb_hidden_nodes, nb_classes], -1, 1),
name="Weights2")
b1 = tf.Variable(tf.zeros([nb_hidden_nodes]), name="Biases1")
b2 = tf.Variable(tf.zeros([nb_classes]), name="Biases2")
activation2 = tf.sigmoid(tf.matmul(input_, w1) + b1)
hypothesis = tf.nn.softmax(tf.matmul(activation2, w2) + b2)
cross_entropy = -tf.reduce_sum(target * tf.log(hypothesis))
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(cross_entropy)
# Start training
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
for i in range(100000):
sess.run(train_step, feed_dict={input_: XOR_X, target: XOR_Y})
if i % 10000 == 0:
print('Epoch ', i)
print('Hypothesis ', sess.run(hypothesis,
feed_dict={input_: XOR_X,
target: XOR_Y}))
print('w1 ', sess.run(w1))
print('b1 ', sess.run(b1))
print('w2 ', sess.run(w2))
print('b2 ', sess.run(b2))
print('cost (ce)', sess.run(cross_entropy,
feed_dict={input_: XOR_X,
target: XOR_Y}))
# Visualize classification boundary
xs = np.linspace(-5, 5)
ys = np.linspace(-5, 5)
pred_classes = []
for x in xs:
for y in ys:
pred_class = sess.run(hypothesis,
feed_dict={input_: [[x, y]]})
pred_classes.append((x, y, pred_class.argmax()))
xs_p, ys_p = [], []
xs_n, ys_n = [], []
for x, y, c in pred_classes:
if c == 0:
xs_n.append(x)
ys_n.append(y)
else:
xs_p.append(x)
ys_p.append(y)
plt.plot(xs_p, ys_p, 'ro', xs_n, ys_n, 'bo')
plt.show()
which gives
I have two embeddings tensor A and B, which looks like
[
[1,1,1],
[1,1,1]
]
and
[
[0,0,0],
[1,1,1]
]
what I want to do is calculate the L2 distance d(A,B) element-wise.
First I did a tf.square(tf.sub(lhs, rhs)) to get
[
[1,1,1],
[0,0,0]
]
and then I want to do an element-wise reduce which returns
[
3,
0
]
but tf.reduce_sum does not allow my to reduce by row. Any inputs would be appreciated. Thanks.
Add the reduction_indices argument with a value of 1, eg.:
tf.reduce_sum( tf.square( tf.sub( lhs, rhs) ), 1 )
That should produce the result you're looking for. Here is the documentation on reduce_sum().
According to TensorFlow documentation, reduce_sum function which takes four arguments.
tf.reduce_sum(input_tensor, axis=None, keep_dims=False, name=None, reduction_indices=None).
But reduction_indices has been deprecated. Better to use axis instead of. If the axis is not set, reduces all its dimensions.
As an example,this is taken from the documentation,
# 'x' is [[1, 1, 1]
# [1, 1, 1]]
tf.reduce_sum(x) ==> 6
tf.reduce_sum(x, 0) ==> [2, 2, 2]
tf.reduce_sum(x, 1) ==> [3, 3]
tf.reduce_sum(x, 1, keep_dims=True) ==> [[3], [3]]
tf.reduce_sum(x, [0, 1]) ==> 6
Above requirement can be written in this manner,
import numpy as np
import tensorflow as tf
a = np.array([[1,7,1],[1,1,1]])
b = np.array([[0,0,0],[1,1,1]])
xtr = tf.placeholder("float", [None, 3])
xte = tf.placeholder("float", [None, 3])
pred = tf.reduce_sum(tf.square(tf.subtract(xtr, xte)),1)
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
nn_index = sess.run(pred, feed_dict={xtr: a, xte: b})
print nn_index