I am new to tensor flow and Neural Networks.
In the code below,everything works fine.
However as soon as I implement the tf.sigmoid function by uncommenting #X21 = tf.sigmoid(X21); I get weird results where in all my predictions are equal to 1. Any reason why this might be happening?
Note that I am predicting house prices which are in thousands.
# Set model weights
b1_1 = tf.cast(tf.Variable(np.random.randn(), name="bias"),tf.float64)
b1_2 = tf.cast(tf.Variable(np.random.randn(), name="bias"),tf.float64)
b2_1 = tf.cast(tf.Variable(np.random.randn(), name="bias"),tf.float64)
W1_1 = tf.cast(tf.Variable(np.random.randn(train_X.shape[1], 1), name="bias"),tf.float64)
W1_2 = tf.cast(tf.Variable(np.random.randn(train_X.shape[1], 1), name="bias"),tf.float64)
W2_1 = tf.cast(tf.Variable(np.random.randn(2, 1), name="bias"),tf.float64)
X11 = tf.add(tf.matmul(train_X,W1_1),b1_1)
X12 = tf.add(tf.matmul(train_X,W1_2),b1_2)
X21 = tf.add(tf.matmul(tf.squeeze(tf.transpose(tf.stack((X11,X12)))),W2_1),b2_1)
#X21 = tf.sigmoid(X21)
# placeholders for a tensor that will be always fed.
Y = tf.placeholder('float64', shape = [47, 1])
cost = (tf.reduce_sum(tf.pow(X21-Y, 2))/(2*n_samples))
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
# Start training
with tf.Session() as sess:
# Run the initializer
sess.run(init)
# Fit all training data
for epoch in range(training_epochs):
#for (x, y) in zip(train_X, train_Y):
sess.run(optimizer, feed_dict={Y: train_Y})
# Display logs per epoch step
if (epoch+1) % display_step == 0:
c = sess.run(cost, feed_dict={Y: train_Y})
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c))
print("Optimization Finished!")
training_cost = sess.run(cost, feed_dict={Y: train_Y})
print("Training cost=", training_cost)
line = sess.run(X21,feed_dict={Y: train_Y})
Related
I have a TensorFlow (TF) model that I'd like to restore and retrain some of its parameters. I know that tf.get_operation_by_name('name of the optimizer') retrieves the original optimizer that was used to train the model before it was stored. However, I don't know how to pass the new list of TF variables that I want the optimizer to retrain!
This example helps illustrate what I want to do:
learning_rate = 0.0001
training_iters = 60000
batch_size = 64
display_step = 20
ImVecDim = 784# The number of elements in a an image vector (flattening a 28x28 2D image)
NumOfClasses = 10
dropout = 0.8
with tf.Session() as sess:
LoadMod = tf.train.import_meta_graph('simple_mnist.ckpt.meta') # This object loads the model
LoadMod.restore(sess, tf.train.latest_checkpoint('./')) # Loading weights and biases and other stuff to the model
g = tf.get_default_graph()
# Variables to be retrained:
wc2 = g.get_tensor_by_name('wc2:0')
bc2 = g.get_tensor_by_name('bc2:0')
wc3 = g.get_tensor_by_name('wc3:0')
bc3 = g.get_tensor_by_name('bc3:0')
wd1 = g.get_tensor_by_name('wd1:0')
bd1 = g.get_tensor_by_name('bd1:0')
wd2 = g.get_tensor_by_name('wd2:0')
bd2 = g.get_tensor_by_name('bd2:0')
out_w = g.get_tensor_by_name('out_w:0')
out_b = g.get_tensor_by_name('out_b:0')
VarToTrain = [wc2,wc3,wd1,wd2,out_w,bc2,bc3,bd1,bd2,out_b]
# Retrieving the optimizer:
Opt = tf.get_operation_by_name('Adam')
# Retraining:
X = g.get_tensor_by_name('ImageIn:0')
Y = g.get_tensor_by_name('LabelIn:0')
KP = g.get_tensor_by_name('KeepProb:0')
accuracy = g.get_tensor_by_name('NetAccuracy:0')
cost = g.get_tensor_by_name('loss:0')
step = 1
while step * batch_size < training_iters:
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
#########################################################################
# Here I want to pass (VarToTrain) to the optimizer (Opt)! #
#########################################################################
if step % display_step == 0:
acc = sess.run(accuracy, feed_dict={X: batch_xs, Y: batch_ys, KP: 1.})
loss = sess.run(cost, feed_dict={X: batch_xs, Y: batch_ys, KP: 1.})
print("Iter " + str(step * batch_size) + ", Minibatch Loss= " + "{:.6f}".format(
loss) + ", Training Accuracy= " + "{:.5f}".format(acc))
step += 1
feed_dict = {X: mnist.test.images[:256], Y: mnist.test.labels[:256], KP: 1.0}
ModelAccuracy = sess.run(accuracy, feed_dict)
print('Retraining finished'+', Test Accuracy = %f' %ModelAccuracy)
Well, I have not figured out a way to do what I want exactly, but I've found a way around the problem; instead of passing a new list of variables to the original optimizer, I defined a new optimizer with those variables passed to its minimize() method. The code is given below:
learning_rate = 0.0001
training_iters = 60000
batch_size = 64
display_step = 20
ImVecDim = 784# The number of elements in a an image vector (flattening a 28x28 2D image)
NumOfClasses = 10
dropout = 0.8
with tf.Session() as sess:
LoadMod = tf.train.import_meta_graph('simple_mnist.ckpt.meta') # This object loads the model
LoadMod.restore(sess, tf.train.latest_checkpoint('./')) # Loading weights and biases and other stuff to the model
g = tf.get_default_graph()
# Retraining:
X = g.get_tensor_by_name('ImageIn:0')
Y = g.get_tensor_by_name('LabelIn:0')
KP = g.get_tensor_by_name('KeepProb:0')
accuracy = g.get_tensor_by_name('NetAccuracy:0')
cost = g.get_tensor_by_name('loss:0')
######################## Producing a list and defining a new optimizer ####################################
VarToTrain = g.get_collection_ref('trainable__variables')
del VarToTrain[0] # Removing a variable from the list
del VarToTrain[5] # Removing another variable from the list
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).\
minimize(cost,var_list= VarToTrain)
##########################################################################################
step = 1
while step * batch_size < training_iters:
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
sess.run(optimizer, feed_dict={X: batch_xs, Y: batch_ys, KP: dropout})
if step % display_step == 0:
acc = sess.run(accuracy, feed_dict={X: batch_xs, Y: batch_ys, KP: 1.})
loss = sess.run(cost, feed_dict={X: batch_xs, Y: batch_ys, KP: 1.})
print("Iter " + str(step * batch_size) + ", Minibatch Loss= " + "{:.6f}".format(
loss) + ", Training Accuracy= " + "{:.5f}".format(acc))
step += 1
feed_dict = {X: mnist.test.images[:256], Y: mnist.test.labels[:256], KP: 1.0}
ModelAccuracy = sess.run(accuracy, feed_dict)
print('Retraining finished'+', Test Accuracy = %f' %ModelAccuracy)
The code above did the job, but it has some issues! First, for some reason, I keep getting error messages every time I define a similar optimizer to the original one, tf.train.AdamOtimizer(). The only optimizer that I can define without TF throwing me error messages is the tf.train.GradientDescentOptimizer(). The other issue in this solution is its inconvenience; in order to produce a list of the variables I want to train, I first have to produce a list of all trainable variables using VarToTrain = g.get_collection_ref('trainable_variables'), print them out, memorize the location of the unwanted variables in the list, then, delete them one by one using del method!! There must be a more elegant way to doing that. What I have done works fine only for small networks!!
I have an error like this:
InvalidArgumentError (see above for traceback): logits and labels must
be same size: logits_size=[10,9] labels_size=[7040,9] [[Node:
SoftmaxCrossEntropyWithLogits =
SoftmaxCrossEntropyWithLogits[T=DT_FLOAT,
_device="/job:localhost/replica:0/task:0/gpu:0"](Reshape, Reshape_1)]]
But I can't find the tensor which occurs this error.... I think it is appeared by size mismatching...
My Input size is batch_size * n_steps * n_input
so, It will be 10*704*100, And I want to make the output
batch_size * n_steps * n_classes => It will by 10*700*9, by Bidirectional RNN
How should I change this code to fix the error?
batch_size means the number of datas like this:
data 1 : ABCABCABCAAADDD...
...
data 10 : ABCCCCABCDBBAA...
And
n_step means the length of each data ( The data was padded by 'O' to fix the length of each data) : 704
And
n_input means the data how to express the each alphabet in each data like this:
A - [1, 2, 1, -1, ..., -1]
And the output of the learning should be like this:
output of data 1 : XYZYXYZYYXY ...
...
output of data 10 : ZXYYRZYZZ ...
the each alphabet of output was effected by the surrounding and sequence of alphabet of input.
learning_rate = 0.001
training_iters = 100000
batch_size = 10
display_step = 10
# Network Parameters
n_input = 100
n_steps = 704 # timesteps
n_hidden = 50 # hidden layer num of features
n_classes = 9
x = tf.placeholder("float", [None, n_steps, n_input])
y = tf.placeholder("float", [None, n_steps, n_classes])
weights = {
'out': tf.Variable(tf.random_normal([2*n_hidden, n_classes]))
}
biases = {
'out': tf.Variable(tf.random_normal([n_classes]))
}
def BiRNN(x, weights, biases):
x = tf.unstack(tf.transpose(x, perm=[1, 0, 2]))
# Forward direction cell
lstm_fw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
# Backward direction cell
lstm_bw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
# Get lstm cell output
try:
outputs, _, _ = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
dtype=tf.float32)
except Exception: # Old TensorFlow version only returns outputs not states
outputs = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
dtype=tf.float32)
# Linear activation, using rnn inner loop last output
return tf.matmul(outputs[-1], weights['out']) + biases['out']
pred = BiRNN(x, weights, biases)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate model
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
step = 1
while step * batch_size < training_iters:
batch_x, batch_y = next_batch(batch_size, r_big_d, y_r_big_d)
#batch_x = batch_x.reshape((batch_size, n_steps, n_input))
# Run optimization op (backprop)
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
if step % display_step == 0:
# Calculate batch accuracy
acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y})
# Calculate batch loss
loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y})
print("Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
"{:.6f}".format(loss) + ", Training Accuracy= " + \
"{:.5f}".format(acc))
step += 1
print("Optimization Finished!")
test_x, test_y = next_batch(batch_size, v_big_d, y_v_big_d)
print("Testing Accuracy:", \
sess.run(accuracy, feed_dict={x: test_x, y: test_y}))
The first return value of static_bidirectional_rnn is a list of tensors - one for each rnn step. By using only the last one in your tf.matmul you're losing all the rest. Instead, stack them into a single tensor of the appropriate shape, reshape for the matmul then shape back.
outputs = tf.stack(outputs, axis=1)
outputs = tf.reshape(outputs, (batch_size*n_steps, n_hidden))
outputs = tf.matmul(outputs, weights['out']) + biases['out']
outputs = tf.reshape(outputs, (batch_size, n_steps, n_classes))
Alternatively, you could use tf.einsum:
outputs = tf.stack(outputs, axis=1)
outputs = tf.einsum('ijk,kl->ijl', outputs, weights['out']) + biases['out']
I am trying to train a neural network for Poker Hand Dataset (10 classes). I have tried to change mnist exampe to fit for this. However, for my program, the accuracy is always about 50%, that is so bothersome. How can I improve the accuracy?
def init_weights(shape):
""" Weight initialization """
weights = tf.random_normal(shape, stddev=0.1)
return tf.Variable(weights)
def forwardprop(X, weights, biases):
"""
Forward-propagation.
IMPORTANT: yhat is not softmax since TensorFlow's softmax_cross_entropy_with_logits() does that internally.
"""
h = tf.nn.sigmoid(tf.add(tf.matmul(X, weights['w_1']),biases['b_1'])) # The \sigma function
yhat = tf.add(tf.matmul(h, weights['w_2']),biases['b_2']) # The \varphi function
return yhat
def get_data(filename, targetname="target", idname="", test_size=0.10, random_state=200):
#read data from csv
df = pd.read_csv(filename)
data = pd.DataFrame(df.ix[:, df.columns != targetname])
if(idname != str("")):
df = df.drop(idname, 1)
data = pd.DataFrame(df.ix[:, df.columns != targetname])
data = pd.get_dummies(data)
all_X = data.as_matrix()
target = df[targetname]
target = pd.factorize(target)[0]
# Convert target into one-hot vectors
num_labels = len(np.unique(target))
all_Y = np.eye(num_labels)[target] # One liner trick!
return train_test_split(all_X, all_Y, test_size=test_size, random_state=random_state)
def main():
start_time = time.time()
train_X, test_X, train_y, test_y = get_data(filename = './data/poker-train.csv', targetname = "class")
#customized for this dataset (or any large dataset), must be chosen as per the data, need to find some generic way
#for small datasets: batch size can be 1 (for more accuracy),
#for large ones: somewhr around 50-80, if taken 1 very slow,50-80 is a trade off of accuracy for time
learning_rate = 0.01
training_epochs = 100
batch_size = 1
# Layer's sizes
x_size = train_X.shape[1] # Number of input nodes
h_size = train_X.shape[1] # Number of hidden nodes
y_size = train_y.shape[1] # Number of outcomes
# Symbols
X = tf.placeholder("float", shape=[None, x_size])
y = tf.placeholder("float", shape=[None, y_size])
# Weight initializations
weights = {
'w_1' : init_weights((x_size, h_size)),
'w_2' : init_weights((h_size, y_size))
}
# Bias initializations
biases = {
'b_1': init_weights([h_size]),
'b_2': init_weights([y_size])
}
# Forward propagation
yhat = forwardprop(X, weights, biases)
predict = tf.argmax(yhat, axis=1)
# Backward propagation
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=yhat))
updates = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# Run SGD
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
total_batch = int(train_X.shape[0]/batch_size)
# Launch the graph
with tf.Session() as sess:
sess.run(init)
for epoch in range(training_epochs):
beg_i=0
# Loop over all batches
for i in range(total_batch):
end_i = beg_i + batch_size
if(end_i > train_X.shape[0]):
end_i = train_X.shape[0]
batch_x, batch_y = train_X[beg_i:end_i,:],train_y[beg_i:end_i,:]
beg_i = beg_i + batch_size
sess.run(updates, feed_dict={X: batch_x, y: batch_y})
train_accuracy = np.mean(np.argmax(train_y, axis=1) == sess.run(predict, feed_dict={X: train_X, y: train_y}))
test_accuracy = np.mean(np.argmax(test_y, axis=1) == sess.run(predict, feed_dict={X: test_X, y: test_y}))
print("Epoch = %d, train accuracy = %.2f%%, test accuracy = %.2f%%"
% (epoch + 1, 100. * train_accuracy, 100. * test_accuracy))
# # Test model
# correct_prediction = tf.equal(tf.argmax(predict, 1), tf.argmax(y, 1))
# # Calculate accuracy
# accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
# print( "Accuracy:", accuracy.eval({X: test_X, y: test_y}))
print("Total time of execution: ",time.time()-start_time)
if __name__ == '__main__':
main()
Output is
Epoch = 100, train accuracy = 55.77%, test accuracy = 55.30%
Epoch = 1, train accuracy = 50.13%, test accuracy = 50.20%
batch_size = 50#1
training_epochs = int(train_X.shape[0]/batch_size)
# Layer's sizes
x_size = train_X.shape[1] # Number of input nodes
h_size = 100#train_X.shape[1] # Number of hidden nodes
y_size = train_y.shape[1] # Number of outcomes
I modify above.
Epoch = 1, train accuracy = 49.98%, test accuracy = 50.11%
Epoch = 500, train accuracy = 90.90%, test accuracy = 90.78%
I define initial states but when I print them they are still zero !!!
Below is my code
def BiRNN(x, weights, biases):
#some x shaping
lstm_fw_cell = rnn_cell.GRUCell(n_hidden)
lstm_bw_cell = rnn_cell.GRUCell(n_hidden)
init_state_fw = lstm_fw_cell.zero_state(batch_size, tf.float32)
init_state_bw = lstm_bw_cell.zero_state(batch_size, tf.float32)
outputs, fstate, bstate = rnn.bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
dtype=tf.float32,initial_state_fw=init_state_fw,
initial_state_bw=init_state_bw)
return [tf.matmul(outputs[-1], weights['out']) + biases['out'],initial_state_fw]
pred = BiRNN(x, weights, biases)
cost = tf.reduce_mean(tf.pow(pred[0] - y, 2))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
init = tf.initialize_all_variables()
saver = tf.train.Saver()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
step = 1
# Keep training until reach max iterations
while step * batch_size < training_iters:
# Start populating the filename queue.
batch_x= example3[step%n_samples]
batch_y=label3[step%n_samples]
batch_x = batch_x.reshape((batch_size, n_steps, n_input))
batch_y = batch_y.reshape((batch_size, n_steps, n_classes))
# Run optimization op (backprop)
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
w=sess.run(pred[1])
print(w);
I want to get the initial states for later
I have a data set that requires two different initial states and I want the initial states to be trained separately.
I am creating a deep learning fully connected NN for the MNIST input. I have a function (it takes placeholder input)
# Create model
def multilayer_perceptron(x, activation_fn, weights, biases, dbg=False):
layerDatas = OrderedDict()
# get each layer data
prev = x
for i in range(len(weights)-1):
weight = weights.items()[i][1]
bias = biases.items()[i][1]
var = 'layer_' + str(i+1)
layerData = tf.add(tf.matmul(prev, weight), bias)
layerData = activation_fn(layerData)
prev = layerData
layerDatas[var] = layerData
# output layer with linear function, using the last layer output value
val = tf.matmul(prev, weights['out'])
out_layer = tf.matmul(prev, weights['out']) + biases['out']
print x.eval() # debug the data
return out_layer
which takes multiple layers in weights and biases. I call the main program with
sess = tf.InteractiveSession() # start a session
print 'Data', n_input, n_classes
print 'Train', train_set_x.shape, train_set_y.shape
(weights, biases) = createWeightsBiases(layers, n_input, n_classes, dbg)
# tf Graph input
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])
# Construct model
pred = multilayer_perceptron(x, activation_fn, weights, biases, dbg)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Initializing the variables
init = tf.initialize_all_variables()
done_looping = False
display_step = 1
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
# Launch the graph
sess.run(init)
# Training cycle
epochs = 1000
for epoch in range(epochs):
avg_cost = 0.
total_batch = int(len(train_set_x)/batch_size)
print 'Batch', total_batch, batch_size
# Loop over all batches
for i in range(total_batch):
batch_x = train_set_x[i * batch_size: (i + 1) * batch_size]
batch_y = train_set_y[i * batch_size: (i + 1) * batch_size]
# Run optimization op (backprop) and cost op (to get loss value)
_, c = sess.run([optimizer, cost], feed_dict={x: batch_x,
y: batch_y})
# Compute average loss
avg_cost += c / total_batch
# Display logs per epoch step
if epoch % display_step == 0:
print "Epoch:", '%04d' % (epoch+1), "cost=", \
"{:.9f}".format(avg_cost)
print "Optimization Finished!"
# Test model
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print "Accuracy:", accuracy.eval({x: valid_set_x, y: valid_set_y})
When I try to print the tensor in my multilayer_perceptron function, I get a crash with
tensorflow.python.framework.errors.InvalidArgumentError: You must feed a value for placeholder tensor 'Placeholder' with dtype float
[[Node: Placeholder = Placeholder[dtype=DT_FLOAT, shape=[], _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
I would appreciate help in getting around this.
You can't eval a placeholder. Instead, you can feed the graph with a proper value for the placeholder and only then extract the content (that's the value you fed the graph with).
So, remove the print x.eval() # debug the data line from the multilayer_perceptron function.
To inspect the value of the placeholder you have to fed it and extract the value you just feed it (side note: it's useless).
If you really want to do this, that's how:
placeholder_value = sess.run(x, feed_dict={x: [1,2,3,4]})
print placeholder_value
It will print the value [1,2,3,4]