Tensorflow Dropout implementation, test accuracy = train accuracy and low, why? - tensorflow

I have tried dropout implementation in Tensorflow.
I do know that dropout should be declared as a placeholder and keep_prob parameter during training and testing should be different. However still almost broke my brain trying to find why with dropout the accuracy is so low. When keep_drop = 1, the train accuracy 99%, test accuracy 85%, with keep_drop = 0.5, both train and test accuracy is 16% Any ideas where to look into, anyone? Thank you!
def forward_propagation(X, parameters, keep_prob):
"""
Implements the forward propagation for the model: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX
Arguments:
X -- input dataset placeholder, of shape (input size, number of examples)
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3"
the shapes are given in initialize_parameters
Returns:
Z3 -- the output of the last LINEAR unit
"""
# Retrieve the parameters from the dictionary "parameters"
W1 = parameters['W1']
b1 = parameters['b1']
W2 = parameters['W2']
b2 = parameters['b2']
W3 = parameters['W3']
b3 = parameters['b3']
Z1 = tf.add(tf.matmul(W1,X),b1) # Z1 = np.dot(W1, X) + b1
A1 = tf.nn.relu(Z1) # A1 = relu(Z1)
A1 = tf.nn.dropout(A1,keep_prob) # apply dropout
Z2 = tf.add(tf.matmul(W2,A1),b2) # Z2 = np.dot(W2, a1) + b2
A2 = tf.nn.relu(Z2) # A2 = relu(Z2)
A2 = tf.nn.dropout(A2,keep_prob) # apply dropout
Z3 = tf.add(tf.matmul(W3,A2),b3) # Z3 = np.dot(W3,A2) + b3
return Z3
def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.0001, lambd = 0.03, train_keep_prob = 0.5,
num_epochs = 800, minibatch_size = 32, print_cost = True):
"""
Implements a three-layer tensorflow neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX.
Arguments:
X_train -- training set, of shape (input size = 12288, number of training examples = 1080)
Y_train -- test set, of shape (output size = 6, number of training examples = 1080)
X_test -- training set, of shape (input size = 12288, number of training examples = 120)
Y_test -- test set, of shape (output size = 6, number of test examples = 120)
learning_rate -- learning rate of the optimization
lambd -- L2 regularization hyperparameter
train_keep_prob -- probability of keeping a neuron in hidden layer for dropout implementation
num_epochs -- number of epochs of the optimization loop
minibatch_size -- size of a minibatch
print_cost -- True to print the cost every 100 epochs
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
"""
ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables
tf.set_random_seed(1) # to keep consistent results
seed = 3 # to keep consistent results
(n_x, m) = X_train.shape # (n_x: input size, m : number of examples in the train set)
n_y = Y_train.shape[0] # n_y : output size
costs = [] # To keep track of the cost
# Create Placeholders of shape (n_x, n_y)
X, Y = create_placeholders(n_x, n_y)
keep_prob = tf.placeholder(tf.float32)
# Initialize parameters
parameters = initialize_parameters()
# Forward propagation: Build the forward propagation in the tensorflow graph
Z3 = forward_propagation(X, parameters, keep_prob)
# Cost function: Add cost function to tensorflow graph
cost = compute_cost(Z3, Y, parameters, lambd)
# Backpropagation.
optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)
# Initialize all the variables
init = tf.global_variables_initializer()
# Start the session to compute the tensorflow graph
with tf.Session() as sess:
# Run the initialization
sess.run(init)
# Do the training loop
for epoch in range(num_epochs):
epoch_cost = 0. # Defines a cost related to an epoch
num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set
seed = seed + 1
minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed)
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# IMPORTANT: The line that runs the graph on a minibatch.
# Run the session to execute the "optimizer" and the "cost", the feedict should contain a minibatch for (X,Y).
_ , minibatch_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y, keep_prob: train_keep_prob})
epoch_cost += minibatch_cost / num_minibatches
# Print the cost every epoch
if print_cost == True and epoch % 100 == 0:
print ("Cost after epoch %i: %f" % (epoch, epoch_cost))
if print_cost == True and epoch % 5 == 0:
costs.append(epoch_cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
# lets save the parameters in a variable
parameters = sess.run(parameters)
print ("Parameters have been trained!")
# Calculate the correct predictions
correct_prediction = tf.equal(tf.argmax(Z3), tf.argmax(Y))
# Calculate accuracy on the test set
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print ("Train Accuracy:", accuracy.eval({X: X_train, Y: Y_train, keep_prob: 1.0}))
print ("Test Accuracy:", accuracy.eval({X: X_test, Y: Y_test, keep_prob: 1.0}))
return parameters

The algo is correct. It is just the keep_prob = 0.5 is too low.
Managed to get 87% accuracy on the test set with the following hyperparameters:
learning_rate = 0.00002, lambd = 0.03, train_keep_prob = 0.90, num_epochs = 1500, minibatch_size = 32,

In the first case your model was overfitting to the data, hence the large difference between the train and test accuracy. Dropout is a regularization technique to reduce the variance of the model by reducing the effect of particular nodes and hence prevent overfitting. But keeping the keep_prob = 0.5(too low) weakens the model and hence it underfits severely to the data, giving an accuracy as low as 16%. You should iterate by gradually decreasing the keep_prob value untill you find a suitable value.

Related

Tensorflow: How to predict a single image from the model trained?

I am new to tensorflow and I am trying to build an image classifier. I have successfully created the model and I am trying to predict a single image after restoring the model. I have gone through various tutorials (https://github.com/sankit1/cv-tricks.com/blob/master/Tensorflow-tutorials/tutorial-2-image-classifier/predict.py) but I can't figure out the feed-dict thing in my code. I am stuck at predict fnction after loading the saved model. Can someone please help me and tell me what to do after loading all the variables from the saved model?
This is the train function which returns the parameters and save them in a model.
def trainModel(train, test, learning_rate=0.0001, num_epochs=2, minibatch_size=32, graph_filename='costs'):
"""
Implements a three-layer tensorflow neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX.
Input:
train : training set
test : test set
learning_rate : learning rate
num_epochs : number of epochs
minibatch_size : size of minibatch
print_cost : True to print the cost every epoch
Returns:
parameters : parameters learnt by the model
"""
ops.reset_default_graph() #for rerunning the model without resetting tf vars
# input and output shapes
(n_x, m) = train.images.T.shape
n_y = train.labels.T.shape[0]
costs = [] #var for storing the costs for later use
# create placeholders
X, Y = placeholderCreator(n_x, n_y)
parameters = paramInitializer()
# Forward propagation
Z3 = forwardPropagation(X, parameters)
# Cost function
cost = costCalc(Z3, Y)
#Backpropagation using adam optimizer
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
# Initialize tf variables
init = tf.global_variables_initializer()
minibatch_size = 32
# Start session to compute Tensorflow graph
with tf.Session() as sess:
# Run initialization
sess.run(init)
for epoch in range(num_epochs): # Training loop
epoch_cost = 0.
num_minibatches = int(m / minibatch_size)
for i in range(num_minibatches):
minibatch_X, minibatch_Y = train.next_batch(minibatch_size) # Get next batch of training data and labels
_, minibatch_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X.T, Y: minibatch_Y.T}) # Execute optimizer and cost function
epoch_cost += minibatch_cost / num_minibatches # Update epoch cost
saver = tf.train.Saver()
# Save parameters
parameters = sess.run(parameters)
saver.save(sess, "~/trained-model.ckpt")
return parameters
And this is my predict function where I am trying to predict an image. I have converted that image into MNIST format for ease of use (predicting_data). I load the model that I saved, use a softmax function on the output of 3rd layer (final output).
def predict():
train = predicting_data.train
(n_x, m) = train.images.T.shape
n_y = train.labels.T.shape[0]
X, Y = placeholderCreator(n_x, n_y)
with tf.Session() as sess:
new_saver = tf.train.import_meta_graph('~/trained-model.ckpt.meta')
new_saver.restore(sess, '~/trained-model.ckpt')
W1 = tf.get_default_graph().get_tensor_by_name('W1:0')
b1 = tf.get_default_graph().get_tensor_by_name('b1:0')
W2 = tf.get_default_graph().get_tensor_by_name('W2:0')
b2 = tf.get_default_graph().get_tensor_by_name('b2:0')
W3 = tf.get_default_graph().get_tensor_by_name('W3:0')
b3 = tf.get_default_graph().get_tensor_by_name('b3:0')
# forward propagation
Z1 = tf.add(tf.matmul(W1,X), b1)
A1 = tf.nn.relu(Z1)
Z2 = tf.add(tf.matmul(W2,A1), b2)
A2 = tf.nn.relu(Z2)
Z3 = tf.add(tf.matmul(W3,A2), b3)
y_pred = tf.nn.softmax(Z3) ####what to do after this????
cost = sess.run(y_pred, feed_dict={X: train.images.T})
Thank you in advance!
As vijay says in his comment:
Your predict part is not right, you need to get the input and predict tensors from the saved graph using the get_tensor_by_name() function and then use it in your sess.run
If you look at this post, it covers a similar problem and has some code examples.
In your code, you can pass 1 to the next_batch method and get just one image.
minibatch_X, minibatch_Y = train.next_batch(1)

Tensorflow reporting wrong AUC

I have an issue while using AUC from tensorflow library. I train my model (convolutional neural network) per batch ( i do not use a validation set) and after each epoch I use an independent test set to obtain my evaluations. The problem lies within AUC evaluation.
In each batch I calculate AUC/Accuracy/Loss/Precision/Recall/F1_score for the training set and then I aggregate the mean of these scores. When I try to do the same for the test set I again calculate the same scores. I notice that all scores except AUC have different values. I think it is not correct test's loss function to increase and AUC to increase as well. And the problem is that test's AUC is almost identical to training's AUC (even though their accuracy, loss error are completely different).
with tf.name_scope("output"):
W = tf.Variable(tf.truncated_normal([num_filters_total, num_classes], stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="b")
scores = tf.nn.xw_plus_b(h_drop, W, b, name="scores")
predictions = tf.argmax(scores, 1, name="predictions")
l2_loss += tf.nn.l2_loss(W, name="l2_loss")
l2_loss += tf.nn.l2_loss(b, name="l2_loss")
tf.summary.histogram("l2", l2_loss)
tf.summary.histogram("weigths", W)
tf.summary.histogram("biases", b)
with tf.name_scope("auc_score"):
# labelOut = tf.argmax(y_place_holder, 1)
probability = tf.nn.softmax(scores)
# auc_scoreTemp = streaming_auc(y_place_holder, probability, curve="PR")
auc_scoreTemp = tf.metrics.auc(y_place_holder, probability, curve="PR")
auc_score = tf.reduce_mean(tf.cast(auc_scoreTemp, tf.float32), name="auc_score")
tf.summary.scalar("auc_score", auc_score)
with tf.name_scope("accuracy"):
labelOut = tf.argmax(y_place_holder, 1)
correct_prediction = tf.equal(predictions, tf.argmax(y_place_holder, 1), name="correct_prediction")
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name="accuracy")
tf.summary.scalar("accuracy", accuracy)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
for batch in batches:
x_batch, y_batch = list(zip(*batch))
_, accuracy_train, auc_training, loss_train, prec_batch, recall_batch, f1_batch \
= sess.run([train_step, accuracy, auc_score, cross_entropy, precision_mini_batch,
recall_mini_batch, f1_score_min_batch], feed_dict={x_place_holder: x_batch,
y_place_holder: y_batch,
emb_place_holder: vocab_inv_emb_dset,
dropout_keep_prob: dropout_rate})
...
for test_batch in test_batches:
auc_test = None
x_test_batch, y_test_batch = list(zip(*test_batch))
accuracy_test, loss_test, auc_test = sess.run([accuracy, cross_entropy, auc_score],
feed_dict={x_place_holder: x_test_batch,
y_place_holder: y_test_batch,
emb_place_holder: vocab_inv_emb_dset_val,
dropout_keep_prob: 1.0})
I also tried to use streaming_auc which returns always 1.
EDIT
In the end of every epoch I reset the local variables by running:
sess.run(tf.local_variables_initializer())
But the first batch outputs really bad results. After the first batch I get normal results from test set which are not close to the training results. I don't know if this is the correct way to do it but results seem more realistic this way.
All of the tf.metrics return a value and an updating op (see here). So as described here you want to use the updating op to accumulate values and then evaluate auc_score to retrieve the accumulated value, something like this:
...
auc_score, auc_op = tf.metrics.auc(y_place_holder, probability, curve="PR")
...
for batch in batches:
sess.run([train_step, accuracy, auc_op, cross_entropy,...)
...
py_auc = sess.run(auc)
EDIT -- toy example showing tf.metrics.auc and tf.contrib.metrics.streaming_auc
import tensorflow as tf
from tensorflow.contrib import metrics
batch_sz = 100
noise_mag = 0.5
nloop = 10
tf.set_random_seed(0)
batch_x = tf.random_uniform([batch_sz, 1], 0, 2, dtype=tf.int32)
noise = noise_mag * tf.random_normal([batch_sz, 1])
batch_y = tf.sigmoid(tf.to_float(batch_x) + noise)
auc_val, auc_accum = tf.metrics.auc(batch_x, batch_y)
#note: contrib.metrics.streaming_auc reverses labels, predictions
auc_val2, auc_accum2 = metrics.streaming_auc(batch_y, batch_x)
with tf.Session() as sess:
sess.run(tf.local_variables_initializer())
for i in range(nloop):
_ = sess.run([auc_accum, auc_accum2])
auc, auc2 = sess.run([auc_val, auc_val2])
print('Accumulated AUC = ', sess.run(auc_val)) #0.9238014
print('Accumulated AUC2 = ', sess.run(auc_val)) #0.9238014

Tensorflow loss minimization is increasing loss

I implemented the linear regression model shown on Tensorflow's main page: https://www.tensorflow.org/get_started/get_started
import numpy as np
import tensorflow as tf
# Model parameters
W = tf.Variable([.3], tf.float32)
b = tf.Variable([-.3], tf.float32)
# Model input and output
x = tf.placeholder(tf.float32)
linear_model = W * x + b
y = tf.placeholder(tf.float32)
# loss
loss = tf.reduce_sum(tf.square(linear_model - y)) # sum of the squares
# optimizer
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss)
# training data
x_train = [1,2,3,4]
y_train = [0,-1,-2,-3]
# training loop
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init) # reset values to wrong
for i in range(1000):
sess.run(train, {x:x_train, y:y_train})
# evaluate training accuracy
curr_W, curr_b, curr_loss = sess.run([W, b, loss], {x:x_train, y:y_train})
print("W: %s b: %s loss: %s"%(curr_W, curr_b, curr_loss))
However, when I change the training data to x_train=[2,4,6,8] and y_train=[3,4,5,6],
the loss starts to increase over time until it reaches 'nan'
As suggested by Steven, you should probably use reduce_mean(), which seems to fix the problem of the increasing loss function. Note that I also increased the number of training steps since reduce_mean() appears to need a bit longer to converge. Be careful with increasing the learning rate, since this may reproduce the problem. Instead, if training time is not a critical factor, you might want to decrease the learning rate and increase the number of training iterations further.
With the reduce_sum() function it worked well for me after decreasing the learning rate from 0.01 to 0.001. Again, thanks to Steven for the suggestion.
import numpy as np
import tensorflow as tf
# Model parameters
W = tf.Variable([.3], tf.float32)
b = tf.Variable([-.3], tf.float32)
# Model input and output
x = tf.placeholder(tf.float32)
linear_model = W * x + b
y = tf.placeholder(tf.float32)
# loss
loss = tf.reduce_mean(tf.square(linear_model - y)) # sum of the squares
# optimizer
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss)
# training data
x_train = [2,4,6,8]
y_train = [0,3,4,5]
# training loop
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init) # reset values to wrong
for i in range(5000):
sess.run(train, {x:x_train, y:y_train})
# evaluate training accuracy
curr_W, curr_b, curr_loss = sess.run([W, b, loss], {x:x_train, y:y_train})
print("W: %s b: %s loss: %s"%(curr_W, curr_b, curr_loss))

Can i generate the input given the output in a pretrained Tensorflow model?

Let's assume i have trained a model for the MNist task, given the following code:
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
import tensorflow as tf
# Parameters
learning_rate = 0.001
training_epochs = 15
batch_size = 100
display_step = 1
# Network Parameters
n_hidden_1 = 256 # 1st layer number of features
n_hidden_2 = 256 # 2nd layer number of features
n_input = 784 # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)
# tf Graph input
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
# Create model
def multilayer_perceptron(x, weights, biases):
# Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
# Hidden layer with RELU activation
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
# Output layer with linear activation
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
return out_layer
# Construct model
pred = multilayer_perceptron(x, weights, biases)
# Test model
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
avg_acc = 0.
total_batch = int(mnist.train.num_examples/batch_size)
# Loop over all batches
for i in range(total_batch):
batch_x, batch_y = mnist.train.next_batch(batch_size)
# Run optimization op (backprop) and cost op (to get loss value)
_, c = sess.run([optimizer, cost], feed_dict={x: batch_x, y: batch_y})
batch_acc = accuracy.eval({x: batch_x, y: batch_y})
# Compute average loss
avg_cost += c / total_batch
avg_acc += batch_acc / total_batch
# Display logs per epoch step
if epoch % display_step == 0:
test_acc = accuracy.eval({x: mnist.test.images, y: mnist.test.labels})
print(
"Epoch:",
'%04d' % (epoch+1),
"cost=",
"{:.9f}".format(avg_cost),
"average_train_accuracy=",
"{:.6f}".format(avg_acc),
"test_accuracy=",
"{:.6f}".format(test_acc)
)
print("Optimization Finished!")
So this model predicts the number shown in an image given the image.
Once i have trained it, could i make the input a 'variable' instead of 'placeholder' and try to reverse engineer the input given an output ?
For example i would like to feed the output '8' and produce a representative image of number eight.
I thought of:
Freezing the model
Add a variable matrix 'M' of the same size as the input between the input and the weights
Feed an Identical matrix as input to the input placeholder
Run the optimizer to learn the 'M' matrix.
Is there a better way ?
If your goal is to reverse the model in the sense that the input should be a digit and the output an image displaying that digit (in say, handwritten form), it is not quite possible to do with machine learning models.
Because machine learning models attempt to create generalizations from the input (so that similar input will provide similar output, although the model was never trained on it) they tend to be quite lossy. Additionally, the reduction from hundreds, thousands and more input variables into a single output variable obviously has to lose some information in the process.
More specifically, although a Multilayer Perceptron (as you're using in your example) is a fully connected Neural Network, some weights are expected to be zero, thus completely dropping the information in certain input variables. Moverover, the same output of a neuron can be retrieved by multiple distinctive input values to it's function, due to the many degrees of freedom.
It is theoretically possible to replace those degrees of freedom and lost information with specifically crafted or random data, but that does not guarantee a successful output.
On a side note, I'm a bit puzzled by this question. If you are able to generate that model yourself, you could also create a similar model that does the opposite. You could train a model to accept an input digit (and perhaps some random seed) and output an image.

Tensor Flow - low accuracy on CNN Mnist Data set / How to batch accuracy calculations

The code is in python 3.5.2 with Tensor flow. The neural network returns an accuracy of between .10 and 5.00, with the higher value tending to be the accuracy of the training data by a factor of roughly 6. I cannot tell whether the neural network is legitimately doing worse than random guessing or if the accuracy code i am using has a serious fault i cannot see.
The neural network consists of 5 layers:
input
conv1 (with max pooling relu and dropout)
conv2 (with max pooling relu and dropout)
fully connected (with relu)
output
uses default Adam optimizer
I feel very suspicious of my accuracy calculations as i made them differently than what i have seen due to RAM constraints. The accuracy calculation does both the accuracy of the train and test data.
acc_total = 0
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
for _ in range(int(mnist.test.num_examples/batch_size)):
test_x, test_y = mnist.test.next_batch(batch_size)
acc = accuracy.eval(feed_dict={x: test_x, y: test_y})
acc_total += acc
print('Accuracy:',acc_total*batch_size/float(mnist.test.num_examples),end='\r')
print('Epoch', epoch, 'current test set accuracy : ',acc_total*batch_size/float(mnist.test.num_examples))
acc_total=0
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
for _ in range(int(mnist.train.num_examples/batch_size)):
train_x, train_y = mnist.train.next_batch(batch_size)
acc = accuracy.eval(feed_dict={x: train_x, y: train_y})
acc_total += acc
print('Accuracy:',acc_total*batch_size/float(mnist.train.num_examples),end='\r')
print('Epoch', epoch, 'current train set accuracy : ',acc_total*batch_size/float(mnist.test.num_examples))
This is a sample of the outputs:
Epoch 0 completed out of 20 loss: 10333239.3396 83.29 ts 429
Epoch 0 current test set accuracy : 0.7072
Epoch 0 current train set accuracy : 3.8039
Epoch 1 completed out of 20 loss: 1831489.40747 39.24 ts 858
Epoch 1 current test set accuracy : 0.7765
Epoch 1 current train set accuracy : 4.2239
Epoch 2 completed out of 20 loss: 1010191.40466 25.89 ts 1287
Epoch 2 current test set accuracy : 0.8069
Epoch 2 current train set accuracy : 4.3898
Epoch 3 completed out of 20 loss: 631960.809082 0.267 ts 1716
Epoch 3 current test set accuracy : 0.8277
Epoch 3 current train set accuracy : 4.4955
Epoch 4 completed out of 20 loss: 439149.724823 2.001 ts 2145
Epoch 4 current test set accuracy : 0.8374
Epoch 4 current train set accuracy : 4.5674
The full code is as follows (sorry about the length i added a lot of comments for my own use ):
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
#Imported Data set
mnist = input_data.read_data_sets("/tmp/data/", one_hot = True)
#ammount of output classes
n_classes = 10
#ammount of examples processed at once
#memory impact of ~500MB for 128 with more on eval runs
batch_size = 128
#Times to cycle through the entire imput data set
epoch_amm =20
#Input and outputs placeholders
x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.float32)
#Dropout is 1-keeprate; fc- fully conected layer dropout;conv conv layer droupout
keep_rate_fc=.5
keep_rate_conv=.75
keep_prob=tf.placeholder(tf.float32)
#Regularization paramaters
Regularization_active= False #True and False MUST be capitalized
Lambda= 1.0 #'weight' of the weights on the loss function
# counter for total steps taken by trainer
training_steps = 1
#Learning Rate For Network
base_Rate = .03
decay_steps = 64
decay_rate = .96
Staircase = True
Learning_Rate = tf.train.exponential_decay(base_Rate, training_steps, decay_steps, decay_rate, staircase='Staircase', name='Exp_decay' )
#Convolution Function returns neuronns that act on a section of prev. layer
def conv2d(x,W):
return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME')
#Pooling function returns max value in 2 by 2 sections
def maxpool2d(x):
return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
def relu(x):
return tf.nn.relu(x,'relu')
def add(x, b):
return tf.add(x,b)
#'Main' method, contains the Neural Network
def convolutional_neural_network(x):
weights = {'W_conv1':tf.Variable(tf.random_normal([5,5,1,32])),
'W_conv2':tf.Variable(tf.random_normal([5,5,32,64])),
'W_fc':tf.Variable(tf.random_normal([7*7*64,1024])),
'W_out':tf.Variable(tf.random_normal([1024,n_classes]))}
biases = {'B_conv1':tf.Variable(tf.random_normal([32])),
'B_conv2':tf.Variable(tf.random_normal([64])),
'B_fc':tf.Variable(tf.random_normal([1024])),
'B_out':tf.Variable(tf.random_normal([n_classes]))}
# Input layer
x = tf.reshape(x, shape=[-1,28,28,1])
#first layer. pass inputs through conv2d and save as conv1 then apply maxpool2d
conv1 = conv2d(x,weights['W_conv1'])
conv1 = add(conv1,biases['B_conv1'])
conv1 = relu(conv1)
conv1 = maxpool2d(conv1)
conv1 = tf.nn.dropout(conv1,keep_rate_conv)
#second layer does same as first layer
conv2 = conv2d(conv1,weights['W_conv2'])
conv2 = add(conv2,biases['B_conv2'])
conv2 = relu(conv2)
conv2 = maxpool2d(conv2)
conv2 = tf.nn.dropout(conv2,keep_rate_conv)
#3rd layer fully connected
fc = tf.reshape(conv2,[-1,7*7*64])
fc = tf.matmul(fc,weights['W_fc'])
fc = add(fc,biases['B_fc'])
fc = relu(fc)
fc = tf.nn.dropout(fc,keep_rate_fc)
#4th and final layer
output = tf.matmul(fc,weights['W_out'])
output = add(output,biases['B_out'])
return output
#Trains The neural Network
def train_neural_network(x):
training_steps = 0
#Initiate The Network
prediction = convolutional_neural_network(x)
#Define the Cost and Cost function
#tf.reduce_mean averages the values of a tensor into one value
cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(prediction,y) )
#Apply Regularization if active
#if Regularization_active :
# print('DEBUG!! LINE 84 REGULARIZATION ACTIVE')
# cost = (tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(prediction,y))+
# (Lambda*(tf.nn.l2_loss(weight['W_conv1'])+
# tf.nn.l2_loss(weight['W_conv2'])+
# tf.nn.l2_loss(weight['W_fc'])+
# tf.nn.l2_loss(weight['W_out'])+
# tf.nn.l2_loss(biases['B_conv1'])+
# tf.nn.l2_loss(biases['B_conv2'])+
# tf.nn.l2_loss(biases['B_fc'])+
# tf.nn.l2_loss(biases['B_out']))))
#Optimizer + Learning_Rate passthrough
optimizer = tf.train.AdamOptimizer().minimize(cost)
#Get Epoch Ammount
hm_epochs = epoch_amm
#Starts C++ Training session
print('Session Started')
with tf.Session() as sess:
#Initiate all Variables
sess.run(tf.global_variables_initializer())
#Begin Logs
summary_writer = tf.summary.FileWriter('/tmp/logs',sess.graph)
#Start Training
for epoch in range(hm_epochs):
epoch_loss = 0
for count in range(int(mnist.train.num_examples/batch_size)):
training_steps = (training_steps+1)
epoch_x, epoch_y = mnist.train.next_batch(batch_size)
count, c = sess.run([optimizer, cost], feed_dict={x: epoch_x, y: epoch_y})
epoch_loss += c
print('Epoch', epoch, 'current epoch loss', epoch_loss, 'batch loss', c,'ts',training_steps,' ', end='\r')
#Log the loss per epoch
print('Epoch', epoch, 'completed out of',hm_epochs,'loss:',epoch_loss,' ')
acc_total = 0
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
for _ in range(int(mnist.test.num_examples/batch_size)):
test_x, test_y = mnist.test.next_batch(batch_size)
acc = accuracy.eval(feed_dict={x: test_x, y: test_y})
acc_total += acc
print('Accuracy:',acc_total*batch_size/float(mnist.test.num_examples),end='\r')
print('Epoch', epoch, 'current test set accuracy : ',acc_total*batch_size/float(mnist.test.num_examples))
acc_total=0
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
for _ in range(int(mnist.train.num_examples/batch_size)):
train_x, train_y = mnist.train.next_batch(batch_size)
acc = accuracy.eval(feed_dict={x: train_x, y: train_y})
acc_total += acc
print('Accuracy:',acc_total*batch_size/float(mnist.train.num_examples),end='\r')
print('Epoch', epoch, 'current train set accuracy : ',acc_total*batch_size/float(mnist.test.num_examples))
print('Complete')
sess.close()
#Run the Neural Network
train_neural_network(x)
The CNN had low results because of 4 reasons:
Improper (Lack of) feeding of dropout
-the keep rate was not being fed into accuracy.eval(feed_dict={x: test_x, y: test_y}) causing the network to underpreform in its accuracy evaluations
Poor Initialization of weights
RELU neuron work significantly better with weights closer to zero than normal distribution.
far to high learning rate
Learning rate of .03 even with decay was far far to high and stoped it from training effectively
errors in accuracy function
The accuracy function of the training data was receiving the size of the data set form mnist.test.num_examples instead of the proper mnist.train.num_examples and caused nonsensical values of accuracy in excess of 100%