Implement TFlearn imdb lstm example by tensorflow - tensorflow

I'm implementing tflearn's lstm imdb example by tensorflow.
I used the same dataset, architecture and hyper-parameters (embedding size, max length of sentence and so on) as tflearn model do, but my model's performance is poor than the tflearn example(after 10 epochs, my model got about 52% accuracy while the example got near 80% ).
I'd appreciated it a lot if you can give me some advice to achieve the appropriate performance of the example.
Below is my code:
import tensorflow as tf
from tflearn.data_utils import to_categorical, pad_sequences
from tflearn.datasets import imdb
from tensorflow.contrib.rnn import BasicLSTMCell
import time
n_class = 2
n_words = 10000
EMBEDDING_SIZE = 128
HIDDEN_SIZE = 128
MAX_LENGTH = 100
lr = 1e-3
epoch = 10
TRAIN_SIZE = 22500
validation_size = 2500
batch_size = 128
KP = 0.8
# IMDB Dataset loading
train, test, _ = imdb.load_data(path='imdb.pkl', n_words=n_words,
valid_portion=0.1, sort_by_len=False)
trainX, trainY = train
validationX, validationY = test
testX, testY = _
# Data preprocessing
# Sequence padding
trainX = pad_sequences(trainX, maxlen=MAX_LENGTH, value=0.)
validationX = pad_sequences(validationX, maxlen=MAX_LENGTH, value=0.)
testX = pad_sequences(testX, maxlen=MAX_LENGTH, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, n_class)
validationY = to_categorical(validationY, n_class)
testY = to_categorical(testY, n_class)
graph = tf.Graph()
with graph.as_default():
# input
text = tf.placeholder(tf.int32, [None, MAX_LENGTH])
labels = tf.placeholder(tf.float32, [None, n_class])
keep_prob = tf.placeholder(tf.float32)
embeddings_var = tf.Variable(tf.truncated_normal([n_words, EMBEDDING_SIZE]), trainable=True)
text_embedded = tf.nn.embedding_lookup(embeddings_var, text)
print(text_embedded.shape) # [batch_size, length, embedding_size]
word_list = tf.unstack(text_embedded, axis=1)
cell = BasicLSTMCell(HIDDEN_SIZE)
dropout_cell = tf.contrib.rnn.DropoutWrapper(cell, input_keep_prob=keep_prob, output_keep_prob=keep_prob)
outputs, encoding = tf.nn.static_rnn(dropout_cell, word_list, dtype=tf.float32)
logits = tf.layers.dense(outputs[-1], n_class, activation=None)
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
optimizer = tf.train.AdamOptimizer(lr).minimize(loss)
prediction = tf.argmax(logits, 1)
accuracy = tf.reduce_mean(tf.cast(tf.equal(prediction, tf.argmax(labels, 1)), tf.float32))
train_steps = epoch * TRAIN_SIZE // batch_size + 1
print("Train steps: ", train_steps)
with tf.Session(graph=graph) as sess:
tf.global_variables_initializer().run()
print("Initialized!")
s = time.time()
offset = 0
for step in range(train_steps):
offset = (offset * step) % (TRAIN_SIZE - batch_size)
batch_text = trainX[offset: offset + batch_size, :]
batch_label = trainY[offset: offset + batch_size, :]
fd = {text: batch_text, labels: batch_label, keep_prob: KP}
_, l, acc = sess.run([optimizer, loss, accuracy], feed_dict=fd)
if step % 100 == 0:
print("Step: %d loss: %f accuracy: %f" % (step, l, acc))
if step % 500 == 0:
v_l, v_acc = sess.run([loss, accuracy], feed_dict={
text: validationX,
labels: validationY,
keep_prob: 1.0
})
print("------------------------------------------------")
print("Validation: step: %d loss: %f accuracy: %f" % (step, v_l, v_acc))
print("------------------------------------------------")
print("Training finished, time consumed:", time.time() - s, " s")
print("Test accuracy: %f" % accuracy.eval(feed_dict={
text: testX,
labels: testY,
keep_prob: 1.0
}))

Sorry, I made a stupid mistake!
The loss :
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
are supposed to be
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))
then, the accuracy is like the tflearn example

Related

Network bug - Inception v1 isn't training

I am trying to use the Inception model (GoogLeNet) from this link https://github.com/tensorflow/models/blob/master/research/slim/nets/inception_v1.py which is implemented by Google using the Tensorflow Slim API, to classify images from the Cifar10 dataset (dataset link https://www.cs.toronto.edu/~kriz/cifar.html). The problem is that the network cost stays almost constant and I can't find the bug. I am very new to tensorflow and slim, so I whould really appreciate any help.
I am using these packages:
import tensorflow as tf
import numpy as np
from tensorflow.python.framework import ops
import matplotlib.pyplot as plt
import os
import pickle
import cv2
from sklearn import model_selection as ms
from nets import inception_v1,inception_utils
import math
%matplotlib inline
And I made theese two functions:
def one_hot_matrix(labels, C):
C = tf.constant(C,name='C')
one_hot_matrix = tf.one_hot(labels,C,axis=0)
sess = tf.Session()
one_hot = sess.run(one_hot_matrix)
sess.close()
return one_hot
def make_mini_batches(X, Y, mini_batch_size):
m = X.shape[0]
mini_batches = []
# number of mini batches of size mini_batch_size in the dataset
num_complete_minibatches = math.floor(m/mini_batch_size)
for k in range(0, num_complete_minibatches):
mini_batch_X = X[k*mini_batch_size : (k+1)*mini_batch_size,...]
mini_batch_Y = Y[k*mini_batch_size : (k+1)*mini_batch_size,:]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# Handling the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
mini_batch_X = X[num_complete_minibatches*mini_batch_size:,...]
mini_batch_Y = Y[num_complete_minibatches*mini_batch_size:,:]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
First, I am reading the dataset:
# function to read the batches
def load_cfar10_batch(cifar10_dataset_folder_path, batch_id):
with open(cifar10_dataset_folder_path + '/data_batch_' + str(batch_id), mode='rb') as file:
# note the encoding type is 'latin1'
batch = pickle.load(file, encoding='latin1')
features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
labels = batch['labels']
datadict = {'data':features,'labels':labels}
return datadict
# combine batches into one dataset (batch size: 10000)
full_data = load_cfar10_batch('./cifar_10',1)['data']
full_labels = []
for i in range(5):
full_labels.extend(load_cfar10_batch('./cifar_10',i+1)['labels'])
if i > 0:
full_data = np.concatenate((full_data,load_cfar10_batch('./cifar_10',i+1)['data']),axis = 0)
# dataset sizes
full_data.shape, len(full_labels)
Followed by some preprocessing and train/validation split:
# data preprocessing (using only 1/10 of the dataset for speed)
X = full_data[0:5000]
y = one_hot_matrix(full_labels[0:5000], 10).T
# split into training-validation sets
x_train, x_val, y_train, y_val = ms.train_test_split(X, y, test_size=0.2, random_state=1)
x_train = x_train.astype('float32')
x_val = x_val.astype('float32')
x_train = x_train / 255.0
x_val = x_val / 255.0
print('x_train shape:',x_train.shape)
print('y_train shape:',y_train.shape)
print('x_val shape:',x_val.shape)
print('y_val shape:',y_val.shape)
Then I initialize the variables:
tf.set_random_seed(1)
seed = 3
(m, n_H, n_W, n_C) = x_train.shape
n_y = y_train.shape[1]
costs = []
print_cost = True
learning_rate = 0.001
num_epochs = 100
minibatch_size = 256
num_minibatches = int(m / minibatch_size)
minibatches = make_mini_batches(x_train, y_train, minibatch_size)
ops.reset_default_graph()
inputs = tf.placeholder(tf.float32,shape=[None, n_H, n_W, n_C],name = 'inputs')
labels = tf.placeholder(tf.int8,shape=[None, n_y],name = 'labels')
# Forward propagation (Inception)
Z = inception_v1.inception_v1(inputs,num_classes = n_y,dropout_keep_prob=1,global_pool=True)[1]['Logits']
# Cost function
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits = Z, labels = labels))
# ADAM optimizer
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
# Initialize variables
init = tf.global_variables_initializer()
And then the training loop:
with tf.Session() as sess:
sess.run(init)
for epoch in range(num_epochs):
# learning rate decay
if epoch % 8 == 0:
learning_rate *= math.pow(0.95,epoch/8)
minibatch_cost = 0.
for minibatch in minibatches:
(minibatch_X, minibatch_Y) = minibatch
_ , temp_cost = sess.run([optimizer, cost], feed_dict={inputs: minibatch_X, labels: minibatch_Y})
minibatch_cost += temp_cost / num_minibatches
# Print the cost every epoch
if print_cost == True and epoch % 5 == 0:
print ("Cost after epoch %i: %f" % (epoch, minibatch_cost),", Learning rate: %f" %(learning_rate))
if print_cost == True and epoch % 1 == 0:
costs.append(minibatch_cost)
# Plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('Cost')
plt.xlabel('Iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
# Calculate the correct predictions
predict_op = tf.argmax(Z, 1)
correct_prediction = tf.equal(predict_op, tf.argmax(labels, 1))
# Calculate accuracy on the validation set
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
#print(accuracy)
train_accuracy = accuracy.eval({inputs: x_train, labels: y_train})
val_accuracy = accuracy.eval({inputs: x_val, labels: y_val})
print("Train Accuracy:", train_accuracy)
print("Validation Accuracy:", val_accuracy)
The output seems like this:
Cost after epoch 0: 2.455999 , Learning rate: 0.001000
Cost after epoch 5: 2.454697 , Learning rate: 0.001000
Cost after epoch 10: 2.454670 , Learning rate: 0.000950
Cost after epoch 15: 2.454655 , Learning rate: 0.000950
Cost after epoch 20: 2.454650 , Learning rate: 0.000857
Cost after epoch 25: 2.454649 , Learning rate: 0.000735
Cost after epoch 30: 2.454659 , Learning rate: 0.000735
Cost after epoch 35: 2.454643 , Learning rate: 0.000599
Cost after epoch 40: 2.454627 , Learning rate: 0.000463
...
...
And so my network is not training.
I managed to find the solution. I had to put the argument scope of inception before calling it, something like this:
with slim.arg_scope(inception_v1.inception_v1_arg_scope()):
Z = inception_v1.inception_v1(inputs,num_classes = n_y,dropout_keep_prob=1,global_pool=True)[1]['Logits']
After that, everything works just fine.

Training/Test data in tensorflow example

I'm new in tensorflow and i follow example at here, but i have one question.
Codes are as follows:
import numpy as np
import tensorflow as tf
from time import time
import math
from include.data import get_data_set
from include.model import model, lr
train_x, train_y = get_data_set("train")
test_x, test_y = get_data_set("test")
x, y, output, y_pred_cls, global_step, learning_rate = model()
global_accuracy = 0
# PARAMS
_BATCH_SIZE = 128
_EPOCH = 60
_SAVE_PATH = "./tensorboard/cifar-10-v1.0.0/"
# LOSS AND OPTIMIZER
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=output, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,
beta1=0.9,
beta2=0.999,
epsilon=1e-08).minimize(loss, global_step=global_step)
# PREDICTION AND ACCURACY CALCULATION
correct_prediction = tf.equal(y_pred_cls, tf.argmax(y, axis=1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# SAVER
merged = tf.summary.merge_all()
saver = tf.train.Saver()
sess = tf.Session()
train_writer = tf.summary.FileWriter(_SAVE_PATH, sess.graph)
try:
print("\nTrying to restore last checkpoint ...")
last_chk_path = tf.train.latest_checkpoint(checkpoint_dir=_SAVE_PATH)
saver.restore(sess, save_path=last_chk_path)
print("Restored checkpoint from:", last_chk_path)
except ValueError:
print("\nFailed to restore checkpoint. Initializing variables instead.")
sess.run(tf.global_variables_initializer())
def train(epoch):
batch_size = int(math.ceil(len(train_x) / _BATCH_SIZE))
i_global = 0
for s in range(batch_size):
batch_xs = train_x[s*_BATCH_SIZE: (s+1)*_BATCH_SIZE]
batch_ys = train_y[s*_BATCH_SIZE: (s+1)*_BATCH_SIZE]
start_time = time()
i_global, _, batch_loss, batch_acc = sess.run(
[global_step, optimizer, loss, accuracy],
feed_dict={x: batch_xs, y: batch_ys, learning_rate: lr(epoch)})
duration = time() - start_time
if s % 10 == 0:
percentage = int(round((s/batch_size)*100))
bar_len = 29
filled_len = int((bar_len*int(percentage))/100)
bar = '=' * filled_len + '>' + '-' * (bar_len - filled_len)
msg = "Global step: {:>5} - [{}] {:>3}% - acc: {:.4f} - loss: {:.4f} - {:.1f} sample/sec"
print(msg.format(i_global, bar, percentage, batch_acc, batch_loss, _BATCH_SIZE / duration))
test_and_save(i_global, epoch)
def test_and_save(_global_step, epoch):
global global_accuracy
i = 0
predicted_class = np.zeros(shape=len(test_x), dtype=np.int)
while i < len(test_x):
j = min(i + _BATCH_SIZE, len(test_x))
batch_xs = test_x[i:j, :]
batch_ys = test_y[i:j, :]
predicted_class[i:j] = sess.run(
y_pred_cls,
feed_dict={x: batch_xs, y: batch_ys, learning_rate: lr(epoch)}
)
i = j
correct = (np.argmax(test_y, axis=1) == predicted_class)
acc = correct.mean()*100
correct_numbers = correct.sum()
mes = "\nEpoch {} - accuracy: {:.2f}% ({}/{})"
print(mes.format((epoch+1), acc, correct_numbers, len(test_x)))
if global_accuracy != 0 and global_accuracy < acc:
summary = tf.Summary(value=[
tf.Summary.Value(tag="Accuracy/test", simple_value=acc),
])
train_writer.add_summary(summary, _global_step)
saver.save(sess, save_path=_SAVE_PATH, global_step=_global_step)
mes = "This epoch receive better accuracy: {:.2f} > {:.2f}. Saving session..."
print(mes.format(acc, global_accuracy))
global_accuracy = acc
elif global_accuracy == 0:
global_accuracy = acc
print("###########################################################################################################")
def main():
for i in range(_EPOCH):
print("\nEpoch: {0}/{1}\n".format((i+1), _EPOCH))
train(i)
if __name__ == "__main__":
main()
sess.close()
In this example, i think, both test and traning data feeds networks, normally only train data must feed network. I can not see any difference between train() and test_and_save() functions. Am i wrong? Thanks
Here is an explanation if I understood your question correctly. The train function is called every epoch and iterates through the training data. At the end of the epoch the test_and_save function is called where the model accuracy is evaluated. This iterates through the test data on the learned weights and calculates the accuracy and saves the model. This is repeated _EPOCH times.
Edit: The model is saved in the test_and_save function. However, the weights are only updated (gradients calculated) when optimizer is passed through sess.run() in the train function. In the test_and_save function the test data is fed to the network however only y_pred_cls is evaluated by passing to sess.run().

Tensorflow why my logistic cost not change?

I am new to tensorflow and now I want to do a classification job by self-defined layers. I have build a model which concat previous several DNN layers output to a big tensor, and then do the logistic binary classification. Like this(but without RNN):
Model Structure
This is my code:
import tensorflow as tf
import numpy as np
import pandas as pd
from load_data import load_data_from_file
TRAIN_FILE = 'train.csv'
TEST_FILE = 'test.csv'
X_train, y_train = load_data_from_file(TRAIN_FILE)
y_train = y_train[np.newaxis]
y_train = y_train.transpose()
n_x_dnn = X_train.shape[1]
n_hidden_dnn = 50
n_merge_dnn1 = 5 * n_hidden_dnn
n_dnn6 = 50
hm_epochs = 10
batch_size = 50
X = tf.placeholder(tf.float32, [None, n_x_dnn])
y = tf.placeholder(tf.float32, [None, 1])
weights = {
'dnn1': tf.Variable(tf.random_normal([n_x_dnn, n_hidden_dnn])),
'dnn2': tf.Variable(tf.random_normal([n_hidden_dnn, n_hidden_dnn])),
'dnn3': tf.Variable(tf.random_normal([n_hidden_dnn, n_hidden_dnn])),
'dnn4': tf.Variable(tf.random_normal([n_hidden_dnn, n_hidden_dnn])),
'dnn5': tf.Variable(tf.random_normal([n_hidden_dnn, n_hidden_dnn])),
'dnn_merge': tf.Variable(tf.random_normal([n_merge_dnn1, n_dnn6])),
'dnn6': tf.Variable(tf.random_normal([n_dnn6, 1]))
}
biases = {
'dnn1_b': tf.Variable(tf.random_normal([n_hidden_dnn])),
'dnn2_b': tf.Variable(tf.random_normal([n_hidden_dnn])),
'dnn3_b': tf.Variable(tf.random_normal([n_hidden_dnn])),
'dnn4_b': tf.Variable(tf.random_normal([n_hidden_dnn])),
'dnn5_b': tf.Variable(tf.random_normal([n_hidden_dnn])),
'dnn_merge_b': tf.Variable(tf.random_normal([n_dnn6])),
'dnn6_b': tf.Variable(tf.random_normal([1])),
}
def define_layers():
# dnn layer 1
dnn_layer1 = tf.add(tf.matmul(X, weights['dnn1']), biases['dnn1_b'])
dnn_layer1 = tf.nn.relu(dnn_layer1)
# dnn layer 2
dnn_layer2 = tf.add(tf.matmul(dnn_layer1, weights['dnn2']), biases['dnn2_b'])
dnn_layer2 = tf.nn.relu(dnn_layer2)
# dnn layer 3
dnn_layer3 = tf.add(tf.matmul(dnn_layer2, weights['dnn3']), biases['dnn3_b'])
dnn_layer3 = tf.nn.relu(dnn_layer3)
# dnn layer 4
dnn_layer4 = tf.add(tf.matmul(dnn_layer3, weights['dnn4']), biases['dnn4_b'])
dnn_layer4 = tf.nn.relu(dnn_layer4)
# dnn layer 5
dnn_layer5 = tf.add(tf.matmul(dnn_layer4, weights['dnn5']), biases['dnn5_b'])
dnn_layer5 = tf.nn.relu(dnn_layer5)
# merge layer
merged = tf.concat([dnn_layer1, dnn_layer2, dnn_layer3, dnn_layer4, dnn_layer5], 1)
dnn_merge = tf.add(tf.matmul(merged, weights['dnn_merge']), biases['dnn_merge_b'])
dnn_merge = tf.nn.relu(dnn_merge)
# dnn layer 6
dnn_layer6 = tf.add(tf.matmul(dnn_merge, weights['dnn6']), biases['dnn6_b'])
dnn_layer6 = tf.nn.sigmoid(dnn_layer6)
return dnn_layer6
logits = define_layers()
cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(hm_epochs):
# epoch_loss = 0
i = 0
while i < len(X_train):
start = i
end = i + batch_size
batch_x = np.array(X_train[start:end])
batch_y = np.array(y_train[start:end])
_, c, l = sess.run([optimizer, cost, logits], feed_dict={X: batch_x,
y: batch_y})
# epoch_loss += c
i += batch_size
# print(c, l)
print('Epoch', epoch, 'completed out of', hm_epochs, 'loss:', c, 'logits:', l)
save_path = saver.save(sess, "model/NF_dnn_and_rnn_model.ckpt")
print("Model saved in file: %s" % save_path)
X_test, y_test = load_data_from_file(TEST_FILE)
predict = sess.run(logits, feed_dict={X: X_test})
print(predict)
And from output it shows my cost not changed:
Epoch 7 completed out of 10 loss: 1.27325 logits: [[ 1.]
Epoch 8 completed out of 10 loss: 1.27325 logits: [[ 1.]
Epoch 9 completed out of 10 loss: 1.27325 logits: [[ 1.]
You should remove the sigmoid activation from your last layer dnn_layer6 as sigmoid_cross_entropy_with_logits also applies sigmoid activation internally.

Tensorflow neural network has high error even in really easy dataset

I'm trying to implement a 1 hidden layer NN for a regression problem. The loss function improves for a few iterations than it gets stuck on a really high error even for a very easy data. Could someone help me find the bug? Here is my code:
import tensorflow as tf
import scipy.io as sio
import numpy as np
reuse_weights = 1
n_nodes_hl1 = 10
batch_size = 200
hm_epochs = 20
# load input from matlab
input_training = sio.loadmat('xMat.mat')
input_training = input_training['xMat']
input_test = sio.loadmat('xMat.mat')
input_test = input_test['xMat']
# find number of measurements and input length
n_measurements = input_training.shape[0]
input_length = input_training.shape[1]
# current input
data_y = input_training[:, input_length - 1].astype(float)
data_x = input_training[:, 0 : input_length - 1].astype(float)
test_data_y = input_test[:, input_length - 1].astype(float)
test_data_x = input_test[:, 0 : input_length - 1].astype(float)
x = tf.placeholder('float32',[None, input_length - 1])
y = tf.placeholder('float32')
# place holder for Dropout algorithm drop probability
keep_prob = tf.placeholder('float32')
def next_batch(data):
"""
Return a total of `batch_size` samples from the array `data`.
"""
if len(data.shape) == 2:
idx = np.arange(0, len(data[:,0])) # get all possible indexes
else:
idx = np.arange(0, len(data)) # get all possible indexes
np.random.shuffle(idx) # shuffle indexes
idx = idx[0:batch_size] # use only `batch_size` random indexes
if len(data.shape) == 2:
data_shuffle = [data[i,:] for i in idx] # get list of `batch_size` random samples
else:
data_shuffle = [data[i] for i in idx] # get list of `batch_size` random samples
data_shuffle = np.asarray(data_shuffle) # get back numpy array
return data_shuffle
def neural_network_model(data, weights, biases, keep_prob):
layer1 = tf.add(tf.matmul(data, weights['h1']), biases['b1'])
layer1 = tf.nn.sigmoid(layer1)
output = tf.add(tf.matmul(layer1, weights['out']), biases['out'])
return output
if reuse_weights:
weights = {
'h1': tf.Variable(sio.loadmat('weights_h1.mat')['weights_h1'], name="weights_h1"),
'out': tf.Variable(sio.loadmat('weights_out.mat')['weights_out'], name="weights_out")
}
biases = {
'b1': tf.Variable(sio.loadmat('biases_b1.mat')['biases_b1'], name="biases_b1"),
'out': tf.Variable(sio.loadmat('biases_out.mat')['biases_out'], name="biases_out")
}
else: # initialize weights
weights = {
'h1': tf.Variable(tf.random_normal([input_length - 1, n_nodes_hl1]), name="weights_h1"),
'out': tf.Variable(tf.random_normal([n_nodes_hl1, 1]), name="weights_out")
}
biases = {
'b1': tf.Variable(tf.random_normal([n_nodes_hl1]), name="biases_b1"),
'out': tf.Variable(tf.random_normal([1]), name="biases_out")
}
def train_neural_network(x):
prediction = neural_network_model(x, weights, biases, keep_prob)[:,0]
cost = tf.reduce_mean(tf.abs(prediction - y))
optimizer = tf.train.AdamOptimizer()
opt = optimizer.minimize(cost)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print(weights['h1'])
for epoch in range(hm_epochs): #training
epoch_loss = 0
for _ in range(int(n_measurements/batch_size)):
_, c, p = sess.run([opt, cost, prediction], feed_dict = {x:next_batch(data_x),\
y:next_batch(data_y) , keep_prob : 1.0})
epoch_loss += c
print('Epoch', epoch, 'completed out of', hm_epochs, 'Average loss:', epoch_loss/int(n_measurements/batch_size))
# prediction
accuracy = tf.reduce_mean(tf.abs(prediction - y))
# Feed 1.0 for keep prob during testing
print("Training data accuracy:", accuracy.eval({x: data_x, y: data_y, keep_prob : 1.0}))
print("Training data predictions:", prediction.eval({x: data_x[0:5,:], keep_prob : 1.0}))
print("Training data:",data_y[0:5])
#print("Test data accuracy:", accuracy.eval({x: test_data_x, y: test_data_y, keep_prob : 1.0}))
# save numpy arrays
sio.savemat('weights_h1.mat', {'weights_h1': weights['h1'].eval()})
sio.savemat('biases_b1.mat', {'biases_b1': biases['b1'].eval()})
sio.savemat('weights_out.mat', {'weights_out': weights['out'].eval()})
sio.savemat('biases_out.mat', {'biases_out': biases['out'].eval()})
train_neural_network(x)
Figured it out, the problem was with the data shuffling. The input and response were shuffled differently (two times random shuffle for each epoch) and thus the input data in each epoch did not correspond to the response data.

TensorFlow convergence is stuck to a high value

I was playing around with TensorFlow and I was looking at the tutorial from:
https://github.com/aymericdamien/TensorFlow-Examples/tree/0.11/examples/3_NeuralNetworks
Because I did not want to do the MNINST database, I changed the script with some data I have created with 8000 training samples. The evaluation is done with 300 test samples. The output is a binary classification. Bear in mind that I just dived in Machine learning and that my knowledge is quite restricted for now.
The script works fine, however my cost is stuck at a very high value and does not converge to 0. First, is it normal? How can I improve this? Did I do something wrong?
Second the accuracy is not very good either, is it due to the bad convergence? Maybe 8000 is not enough to train the model? or the value are too scattered to actually be able to get a better accuracy.
I found a similar problem here:
tensorflow deep neural network for regression always predict same results in one batch
but I do not understand why or how this problem applies to me.
Could someone help me please?
Here is what the output is:
Starting 1st session...
Epoch: 0001 cost= 39926820.730
and at the end I get:
Epoch: 0671 cost= 64.798
Epoch: 0681 cost= 64.794
Epoch: 0691 cost= 64.791
Optimization Finished!
Accuracy: 0.716621
The codes is as follow:
import tensorflow as tf
import pandas as pd
import numpy as np
import csv
inputData = pd.read_csv('./myInputDataNS.csv', header=None)
runData = pd.read_csv('./myTestDataNS.csv', header=None)
trX, trY = inputData.iloc[:, :7].values, inputData.iloc[:,7].values
temp = trY.shape
trY = trY.reshape(temp[0], 1)
trY = np.concatenate((1-trY, trY), axis=1)
teX, teY = runData.iloc[:, :7].values, runData.iloc[:, 7].values
temp = teY.shape
teY = teY.reshape(temp[0], 1)
teY = np.concatenate((1-teY, teY), axis=1)
# Parameters
learning_rate = 0.001
training_epochs = 700
batch_size = 100
display_step = 10
# Network Parameters
n_hidden_1 = 320
n_hidden_2 = 320
n_hidden_3 = 320
n_input = 7
n_classes = 2 # (0 or 1)
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])
def multilayer_perceptron(x, weights, biases):
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
layer_3 = tf.add(tf.matmul(layer_2, weights['h3']), biases['b3'])
layer_3 = tf.nn.relu(layer_3)
out_layer = tf.matmul(layer_3, weights['out']) + biases['out']
return out_layer
weights = {
'h1': tf.Variable(tf.random_normal([len(trX[0]), n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'h3': tf.Variable(tf.random_normal([n_hidden_3, n_hidden_3])),
'out': tf.Variable(tf.random_normal([n_hidden_3, n_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'b3': tf.Variable(tf.random_normal([n_hidden_3])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
pred = multilayer_perceptron(x, weights, biases)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
init = tf.global_variables_initializer()
print("Starting 1st session...")
with tf.Session() as sess:
sess.run(init)
for epoch in range(training_epochs):
epoch_loss = 0
i = 0
while i < len(trX):
start = i
end = i + batch_size
batch_x = np.array(trX[start:end])
batch_y = np.array(trY[start:end])
_, c = sess.run([optimizer, cost], feed_dict={x: batch_x, y: batch_y})
epoch_loss += c
i += batch_size
epoch_loss += c / len(trX[0])
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.3f}".format(epoch_loss))
print("Optimization Finished!")
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print("Accuracy:", accuracy.eval({x: teX, y: teY}))