Issue about forget_bias - tensorflow

I'm going to write a BasicLSTMCell with C++,and I need check it work well.
I used tf.nn.rnn_cell.BasicLSTMCell to implement a LSTM with 4 cells,and I set
forget_bias to 1.Then I Check the LSTM'bias using this code:
//////////////////////////////////////////////////////////////
with tf.variable_scope("LSTM"):
Cell=tf.nn.rnn_cell.BasicLSTMCell(4,forget_bias=1,state_is_tuple=True)
Sessin=tf.Session()
state=Cell.zero_state(1,dtype=tf.float32)
with tf.variable_scope("Ut_def"):
out,D=tf.nn.dynamic_rnn(
cell=Cell,inputs=Feed,
initial_state=state,
time_major=False)
Sessin.run(tf.global_variables_initializer())
#Saver.save(Sessin,"./123/Var",global_step=1)
out,D=Sessin.run([out,D],feed_dict={Feed:np.arange(8).reshape(1,2,4)})
tf.train.Saver().save(Sessin,"./123/Var",global_step=1)
trainable_vars_dict = {}
for key in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES):
trainable_vars_dict[key.name] = Sessin.run(key)
# Checking the names of the keys
print(key.name)
lstm_weight_vals = trainable_vars_dict["Ut_def/RNN/BasicLSTMCell/Linear/Matrix:0"]
B=trainable_vars_dict["Ut_def/RNN/BasicLSTMCell/Linear/Bias:0"]
print(B)
/////////////////////////////////////////////////////////////
But I found these bias were all zeros whatever I changed the forget_bias.
Someone know that what it is going on?
For figuring out how lstm work,I just using wights and bias extracting from tensorflow for getting same result. Absolutely they are not equivalent.
w_i, w_C, w_f, w_o = np.split(lstm_weight_vals, 4, axis=1)
w_xi = w_i[:4, :]
w_hi = w_i[4:, :]
w_xC = w_C[:4, :]
w_hC = w_C[4:, :]
w_xf = w_f[:4, :]
w_hf = w_f[4:, :]
w_xo = w_o[4:, :]
w_ho = w_o[4:, :]
Input=tf.range(4,dtype=tf.float32)
Input=tf.reshape(Input,shape=[1,4])
i=tf.sigmoid(tf.matmul(tf.zeros(shape=[1,4]),w_xi)+tf.matmul(Input,w_hi))
o=tf.sigmoid(tf.matmul(tf.zeros(shape=[1,4]),w_xo)+tf.matmul(Input,w_ho))
g=tf.tanh(tf.matmul(tf.zeros(shape=[1,4]),w_xC)+tf.matmul(Input,w_hC))
f=tf.sigmoid(tf.matmul(tf.zeros(shape=[1,4]),w_xf)+tf.matmul(Input,w_hf))
Cstate=tf.zeros(shape=[1,4])*f+i*g
Hstate=tf.tanh(Cstate)*o
Input=Input+4
i=tf.sigmoid(tf.matmul(Cstate,w_xi)+tf.matmul(Input,w_hi))
o=tf.sigmoid(tf.matmul(Cstate,w_xo)+tf.matmul(Input,w_ho))
g=tf.tanh(tf.matmul(Cstate,w_xC)+tf.matmul(Input,w_hC))
f=tf.sigmoid(tf.matmul(Cstate,w_xf)+tf.matmul(Input,w_hf))
Cstate=Cstate*f+i*g
Hstate=tf.tanh(Cstate)*o

I has found wrong code. Code should like this:
i=tf.sigmoid(tf.matmul(Hstate,w_xi)+tf.matmul(Input,w_hi))
o=tf.sigmoid(tf.matmul(Hstate,w_xo)+tf.matmul(Input,w_ho))
g=tf.tanh(tf.matmul(Hstate,w_xC)+tf.matmul(Input,w_hC))
f=tf.sigmoid(tf.matmul(Hstate,w_xf)+tf.matmul(Input,w_hf)+1)
It's Hstate instead of Csatae.

Related

Hierachical Bayesian Linear Regression using PyMC3 is super slow

I am trying to write some code for implementing HBM in the case of logistic regression using the adults dataset from the UCI repository.
I have already written the code, but sampling is super slow, on the order of 107s per sample, for even 64 dimensions or features. Am I doing something wrong?
I am attaching the code for reference. I also rescaled the data thanks to suggestions to try to speed it up, to no avail.
I appreciate any feedback.
The code is a mixture of what has been written here and here.
#re loading the dataset this time without converting the country into one-hot vector rather for hierarchical modeling
adult_df = pd.read_csv('adult.data', header=None, sep=', ', )
adult_df.columns = ["Age", "WorkClass", "fnlwgt", "Education", "EducationNum",
"MaritalStatus", "Occupation", "Relationship", "Race", "Gender",
"CapitalGain", "CapitalLoss", "HoursPerWeek", "NativeCountry", "Income"]
adult_df["Income"] = adult_df["Income"].map({ "<=50K": 0, ">50K": 1 })
adult_df.drop("CapitalGain", axis=1, inplace=True,)
adult_df.drop("CapitalLoss", axis=1, inplace=True,)
adult_df.Age = adult_df.Age.astype(float)
adult_df.fnlwgt = adult_df.fnlwgt.astype(float)
adult_df.EducationNum = adult_df.EducationNum.astype(float)
adult_df.HoursPerWeek = adult_df.HoursPerWeek.astype(float)
# dropping native country here!!
adult_df = pd.get_dummies(adult_df, columns=[
"WorkClass", "Education", "MaritalStatus", "Occupation", "Relationship",
"Race", "Gender",
])
standard_scaler_cols = ["Age", "fnlwgt", "EducationNum", "HoursPerWeek",]
other_cols = list(set(adult_df.columns) - set(standard_scaler_cols))
mapper = DataFrameMapper(
[([col,], StandardScaler(),) for col in standard_scaler_cols] +
[(col, None,) for col in other_cols]
)
le = preprocessing.LabelEncoder()
country_idx = le.fit_transform(adult_df['NativeCountry'])
pd.value_counts(pd.Series(y_all))
y_all = adult_df["Income"].values
adult_df.drop("Income", axis=1, inplace=True,)
adult_df.drop("NativeCountry", axis=1, inplace=True,)
n_countries = len(set(country_idx))
n_features = len(adult_df.columns)
min_max_scaler = preprocessing.MinMaxScaler()
adult_df = min_max_scaler.fit_transform(adult_df)
X_train, X_test, y_train, y_test, country_idx_train, country_idx_test = train_test_split(adult_df, y_all, country_idx, train_size=0.1, test_size=0.25, stratify=y_all, random_state=rs)
with pm.Model() as multilevel_model:
# Hyperiors for intercept
mu_theta = pm.MvNormal(name='mu_a', mu=np.zeros(n_features), cov=np.eye(n_features), shape=n_features)
packed_L_theta = pm.LKJCholeskyCov('packed_L', n=n_features,
eta=2., sd_dist=pm.HalfCauchy.dist(2.5))
L_theta = pm.expand_packed_triangular(n_features, packed_L_theta)
theta = pm.MvNormal(mu=mu_theta, name='mu_theta', chol=L_theta, shape=[n_countries, n_features])
# Hyperiors for intercept (Comment 1)
mu_b = pm.StudentT('mu_b', nu=3, mu=0., sd=1.0)
sigma_b = pm.HalfNormal('sigma_b', sd=1.0)
b = pm.Normal('b', mu=mu_b, sd=sigma_b, shape=[n_countries, 1])
# Calculate predictions given values
# for intercept and slope
yhat = pm.invlogit(b[country_idx_train] + pm.math.dot(theta[country_idx_train], np.asarray(X_train).T))
#Make predictions fit reality
y = pm.Binomial('y', n=np.ones(y_train.shape[0]), p=yhat, observed=y_train)
You will probably have more success on our discourse with pymc3 questions: https://discourse.pymc.io/ I invite you to move your question there.
The first thing I would check is if your Theano is compiling against MKL libraries, or maybe even using Python mode. If you installed things via conda, that should give you MKL, if you're using pip it might be more difficult. http://deeplearning.net/software/theano/troubleshooting.html#test-blas

How to properly update variables in a while loop in TensorFlow?

Can someone please explain (or point me to the relevant place in the documentation that I've missed) how to properly update a tf.Variable() in a tf.while_loop? I am trying to update variables in the loop that will store some information until the next iteration of the loop using the assign() method. However, this isn't doing anything.
As the values of mu_tf and sigma_tf are being updated by the minimizer, while step_mu isn't, I am obviously doing something wrong, but I don't understand what it is. Specifically, I guess I should say that I know assign() does not do anything until it is executed when the graph is run, so I know that I can do
sess.run(step_mu.assign(mu_tf))
and that will update step_mu, but I want to do this in the loop correctly. I don't understand how to add an assign operation to the body of the loop.
A simplified working example of what I'm doing follows here:
import numpy as np
import tensorflow as tf
mu_true = 0.5
sigma_true = 1.5
n_events = 100000
# Placeholders
X = tf.placeholder(dtype=tf.float32)
# Variables
mu_tf = tf.Variable(initial_value=tf.random_normal(shape=[], mean=0., stddev=0.1,
dtype=tf.float32),
dtype=tf.float32)
sigma_tf = tf.Variable(initial_value=tf.abs(tf.random_normal(shape=[], mean=1., stddev=0.1,
dtype=tf.float32)),
dtype=tf.float32,
constraint=lambda x: tf.abs(x))
step_mu = tf.Variable(initial_value=-99999., dtype=tf.float32)
step_loss = tf.Variable(initial_value=-99999., dtype=tf.float32)
# loss function
gaussian_dist = tf.distributions.Normal(loc=mu_tf, scale=sigma_tf)
log_prob = gaussian_dist.log_prob(value=X)
negative_log_likelihood = -1.0 * tf.reduce_sum(log_prob)
# optimizer
optimizer = tf.train.AdamOptimizer(learning_rate=0.1)
# sample data
x_sample = np.random.normal(loc=mu_true, scale=sigma_true, size=n_events)
# Construct the while loop.
def cond(step):
return tf.less(step, 10)
def body(step):
# gradient step
train_op = optimizer.minimize(loss=negative_log_likelihood)
# update step parameters
with tf.control_dependencies([train_op]):
step_mu.assign(mu_tf)
return tf.add(step,1)
loop = tf.while_loop(cond, body, [tf.constant(0)])
# Execute the graph
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
step_loss = sess.run(fetches=negative_log_likelihood, feed_dict={X: x_sample})
print('Before loop:\n')
print('mu_tf: {}'.format(sess.run(mu_tf)))
print('sigma_tf: {}'.format(sess.run(sigma_tf)))
print('step_mu: {}'.format(sess.run(step_mu)))
print('step_loss: {}\n'.format(step_loss))
sess.run(fetches=loop, feed_dict={X: x_sample})
print('After loop:\n')
print('mu_tf: {}'.format(sess.run(mu_tf)))
print('sigma_tf: {}'.format(sess.run(sigma_tf)))
print('step_mu: {}'.format(sess.run(step_mu)))
print('step_loss: {}'.format(step_loss))

RNN Slow-down phenomenon of Tensorflow

I found a peculiar property of lstm cell(not limited to lstm but I only examined with this) of tensorflow which has not been reported as far as I know.
I don't know whether it actually has, so I left this post in SO. Below is a toy code for this problem:
import tensorflow as tf
import numpy as np
import time
def network(input_list):
input,init_hidden_c,init_hidden_m = input_list
cell = tf.nn.rnn_cell.BasicLSTMCell(256, state_is_tuple=True)
init_hidden = tf.nn.rnn_cell.LSTMStateTuple(init_hidden_c, init_hidden_m)
states, hidden_cm = tf.nn.dynamic_rnn(cell, input, dtype=tf.float32, initial_state=init_hidden)
net = [v for v in tf.trainable_variables()]
return states, hidden_cm, net
def action(x, h_c, h_m):
t0 = time.time()
outputs, output_h = sess.run([rnn_states[:,-1:,:], rnn_hidden_cm], feed_dict={
rnn_input:x,
rnn_init_hidden_c: h_c,
rnn_init_hidden_m: h_m
})
dt = time.time() - t0
return outputs, output_h, dt
rnn_input = tf.placeholder("float", [None, None, 512])
rnn_init_hidden_c = tf.placeholder("float", [None,256])
rnn_init_hidden_m = tf.placeholder("float", [None,256])
rnn_input_list = [rnn_input, rnn_init_hidden_c, rnn_init_hidden_m]
rnn_states, rnn_hidden_cm, rnn_net = network(rnn_input_list)
feed_input = np.random.uniform(low=-1.,high=1.,size=(1,1,512))
feed_init_hidden_c = np.zeros(shape=(1,256))
feed_init_hidden_m = np.zeros(shape=(1,256))
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for i in range(10000):
_, output_hidden_cm, deltat = action(feed_input, feed_init_hidden_c, feed_init_hidden_m)
if i % 10 == 0:
print 'Running time: ' + str(deltat)
(feed_init_hidden_c, feed_init_hidden_m) = output_hidden_cm
feed_input = np.random.uniform(low=-1.,high=1.,size=(1,1,512))
[Not important]What this code does is to generate an output from 'network()' function containing LSTM where the input's temporal dimension is 1, so output's is also 1, and pull in&out initial state for each step of running.
[Important] Looking the 'sess.run()' part. For some reasons in my real code, I happened to put [:,-1:,:] for 'rnn_states'. What is happening is then the time spent for each 'sess.run()' increases. For some inspection by my own, I found this slow down stems from that [:,-1:,:]. I just wanted to get the output at the last time step. If you do 'outputs, output_h = sess.run([rnn_states, rnn_hidden_cm], feed_dict{~' w/o [:,-1:,:] and take 'last_output = outputs[:,-1:,:]' after the 'sess.run()', then the slow down does not occur.
I do not know why this exponential increment of time happens with that [:,-1:,:] running. Is this the nature of tensorflow hasn't been documented but particularly slows down(may be adding more graph by its own?)?
Thank you, and hope this mistake not happen for other users by this post.
I encountered the same problem, with TensorFlow slowing down for each iteration I ran it, and found this question while trying to debug it. Here's a short description of my situation and how I solved it for future reference. Hopefully it can point someone in the right direction and save them some time.
In my case the problem was mainly that I didn't make use of feed_dict to supply the network state when executing sess.run(). Instead I redeclared outputs, final_state and prediction every iteration. The answer at https://github.com/tensorflow/tensorflow/issues/1439#issuecomment-194405649 made me realize how stupid that was... I was constantly creating new graph nodes in every iteration, making it all slower and slower. The problematic code looked something like this:
# defining the network
lstm_layer = rnn.BasicLSTMCell(num_units, forget_bias=1)
outputs, final_state = rnn.static_rnn(lstm_layer, input, initial_state=rnn_state, dtype='float32')
prediction = tf.nn.softmax(tf.matmul(outputs[-1], out_weights)+out_bias)
for input_data in data_seq:
# redeclaring, stupid stupid...
outputs, final_state = rnn.static_rnn(lstm_layer, input, initial_state=rnn_state, dtype='float32')
prediction = tf.nn.softmax(tf.matmul(outputs[-1], out_weights)+out_bias)
p, rnn_state = sess.run((prediction, final_state), feed_dict={x: input_data})
The solution was of course to only declare the nodes once in the beginning, and supply the new data with feed_dict. The code went from being half slow (> 15 ms in the beginning) and becoming slower for every iteration, to execute every iteration in around 1 ms. My new code looks something like this:
out_weights = tf.Variable(tf.random_normal([num_units, n_classes]), name="out_weights")
out_bias = tf.Variable(tf.random_normal([n_classes]), name="out_bias")
# placeholder for the network state
state_placeholder = tf.placeholder(tf.float32, [2, 1, num_units])
rnn_state = tf.nn.rnn_cell.LSTMStateTuple(state_placeholder[0], state_placeholder[1])
x = tf.placeholder('float', [None, 1, n_input])
input = tf.unstack(x, 1, 1)
# defining the network
lstm_layer = rnn.BasicLSTMCell(num_units, forget_bias=1)
outputs, final_state = rnn.static_rnn(lstm_layer, input, initial_state=rnn_state, dtype='float32')
prediction = tf.nn.softmax(tf.matmul(outputs[-1], out_weights)+out_bias)
# actual network state, which we input with feed_dict
_rnn_state = tf.nn.rnn_cell.LSTMStateTuple(np.zeros((1, num_units), dtype='float32'), np.zeros((1, num_units), dtype='float32'))
it = 0
for input_data in data_seq:
encl_input = [[input_data]]
p, _rnn_state = sess.run((prediction, final_state), feed_dict={x: encl_input, rnn_state: _rnn_state})
print("{} - {}".format(it, p))
it += 1
Moving the declaration out from the for loop also got rid of the problem which the OP sdr2002 had, doing a slice outputs[-1] in sess.run() inside the for loop.
As mentioned above, no sliced output for 'sess.run()' is much appreciated for this case.
def action(x, h_c, h_m):
t0 = time.time()
outputs, output_h = sess.run([rnn_states, rnn_hidden_cm], feed_dict={
rnn_input:x,
rnn_init_hidden_c: h_c,
rnn_init_hidden_m: h_m
})
outputs = outputs[:,-1:,:]
dt = time.time() - t0
return outputs, output_h, dt

Reevaluate dependencies of a while loop

I am trying to understand how while loops work in tensorflow. In particular I have a variable, x say, that I update in the while loop, and then I have some values that depends on x, but when running the while loop the values does not seem to be updated when x changes.
The following code where I have tried to implement a simple gradient decent optimizer might illustrate what I mean:
import tensorflow as tf
x = tf.Variable(initial_value=4, dtype=tf.float32, trainable=False)
y = tf.multiply(x,x)
grad = tf.gradients(y, x)
def update_g():
with tf.control_dependencies(grad):
return tf.identity(grad[0])
iterations = tf.placeholder(tf.int32)
i = tf.constant(0, dtype=tf.int32)
g = tf.Variable(initial_value=grad[0], dtype=tf.float32, trainable=False)
c = lambda i_loop, x_loop, g_loop: i_loop < iterations
b = lambda i_loop, x_loop, g_loop: [i_loop+1, tf.assign(x, x_loop - 10*g_loop), update_g()]
l = tf.while_loop(c, b, [i, x, g], back_prop=False, parallel_iterations=1)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
res_g = sess.run(grad)
res_l = sess.run(l, feed_dict={iterations: 10})
res_x = sess.run(x)
print(res_g)
print(res_l)
print(res_x)
Running this on tensorflow 1.0 gives this result for me:
[8.0]
[10, -796.0, 8.0]
-796.0
and the issue is that the value of the gradient is not updated as x changes.
I have tried various variations on the above code, but can not seem to find a version that works. Basically my question is if the above can be made to work, or do I need to rethink the approach.
(Maybe I should add that I am not interested in writing a gradient decent optimizer, I just built this to have something simple and understandable to work with.)
With some help from the other answer I managed to get this working. Posting the complete code here as a second answer:
x = tf.constant(4, dtype=tf.float32)
y = tf.multiply(x,x)
grad = tf.gradients(y, x)
def loop_grad(x_loop):
y2 = tf.multiply(x_loop, x_loop)
return tf.gradients(y2, x_loop)[0]
iterations = tf.placeholder(tf.int32)
i = tf.constant(0, dtype=tf.int32)
c = lambda i_loop, x_loop: i_loop < iterations
b = lambda i_loop, x_loop: [i_loop+1, x_loop - 0.1*loop_grad(x_loop)]
l = tf.while_loop(c, b, [i, x], back_prop=False, parallel_iterations=1)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.05)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
sess.run(tf.global_variables_initializer())
res_g = sess.run(grad)
res_l = sess.run(l, feed_dict={iterations: 100000})
res_x = sess.run(x)
print(res_g)
print(res_l)
print(res_x)
changing the learning rate from the code in the question and increasing the number of iterations gives the output:
[8.0]
[100000, 5.1315068e-38]
4.0
Which seems to be working. It runs reasonably fast even with high iteration count, so there does not seem to be something really horrible going on with updating the graph in each iteration of the while loop, a fear of which probably was one reason why I didn't opt for this approach from the start.
Having tf.Variable objects as loop variables for while loops is not supported, and will behave in weird nondeterministic ways. Always use tf.assign and friends to update the value of a tf.Variable.

How can I make a greyscale copy of a Surface in pygame?

In pygame, I have a surface:
im = pygame.image.load('foo.png').convert_alpha()
im = pygame.transform.scale(im, (64, 64))
How can I get a grayscale copy of the image, or convert the image data to grayscale? I have numpy.
Use a Surfarray, and filter it with numpy or Numeric:
def grayscale(self, img):
arr = pygame.surfarray.array3d(img)
#luminosity filter
avgs = [[(r*0.298 + g*0.587 + b*0.114) for (r,g,b) in col] for col in arr]
arr = numpy.array([[[avg,avg,avg] for avg in col] for col in avgs])
return pygame.surfarray.make_surface(arr)
After a lot of research, I came up with this solution, because answers to this question were too slow for what I wanted this feature to:
def greyscale(surface: pygame.Surface):
start = time.time() # delete me!
arr = pygame.surfarray.array3d(surface)
# calulates the avg of the "rgb" values, this reduces the dim by 1
mean_arr = np.mean(arr, axis=2)
# restores the dimension from 2 to 3
mean_arr3d = mean_arr[..., np.newaxis]
# repeat the avg value obtained before over the axis 2
new_arr = np.repeat(mean_arr3d[:, :, :], 3, axis=2)
diff = time.time() - start # delete me!
# return the new surface
return pygame.surfarray.make_surface(new_arr)
I used time.time() to calculate the time cost for this approach, so for a (800, 600, 3) array it takes: 0.026769161224365234 s to run.
As you pointed out, here is a variant preserving the luminiscence:
def greyscale(surface: pygame.Surface):
arr = pygame.surfarray.pixels3d(surface)
mean_arr = np.dot(arr[:,:,:], [0.216, 0.587, 0.144])
mean_arr3d = mean_arr[..., np.newaxis]
new_arr = np.repeat(mean_arr3d[:, :, :], 3, axis=2)
return pygame.surfarray.make_surface(new_arr)
The easiest way is to iterate over all the pixels in your image and call .get_at(...) and .set_at(...).
This will be pretty slow, so in answer to your implicit suggestion about using NumPy, look at http://www.pygame.org/docs/tut/surfarray/SurfarrayIntro.html. The concepts and most of the code are identical.