I have written tensorflow code based on:
http://www.wildml.com/2015/12/implementing-a-cnn-for-text-classification-in-tensorflow/
but using precomputed word embeddings from the GoogleNews word2vec 300 dimension model.
I created my own data from the UCML News Aggregator Dataset in which I parsed the content of the news articles and have created my own labels.
Due to the size of the articles I use TF-IDF to filter out the top 120 words per article and embed those into 300 dimensions.
When I run the CNN I created regardless of the hyper parameters it converges to a small general accuracy, around 38%.
Hyper parameters changed:
Various filter sizes:
I've tried a single filter of 1,2,3
Combinations of filters [3,4,5], [1,3,4]
Learning Rate:
I've varied this from very low to very high, very low doesn't converge to 38% but anything between 0.0001 and 0.4 does.
Batch Size:
Tried many ranges between 5 and 100.
Weight and Bias Initialization:
Set stddev of weights between 0.4 and 0.01.
Set bias initial values between 0 and 0.1.
Tried using the xavier initializer for the conv2d weights.
Dataset Size:
I have only tried on two partial data sets, one with 15 000 training data, and the other on the 5000 test data. In total I have 263 000 data to train on. There is no accuracy difference whether trained and evaluated on the 15 000 training data or by using the 5000 test data as the training data (to save testing time).
I've run successful classifications on the 15 000 / 5000 split using a feed forward network with a BoW input (93% accurate), TF-IDF with SVM (92%), and TF-IDF with Native Bayes (91.5%). So I don't think it is the data.
What does this imply? Is the model just a poor model for this task? Is there an error in my work?
I feel like my do_eval function is incorrect to evaluate the accuracy / loss over an epoch of the data:
def do_eval(data_set,
label_set,
batch_size):
"""
Runs one evaluation against the full epoch of data.
data_set: The set of embeddings to eval
label_set: the set of labels to eval
"""
# And run one epoch of eval.
true_count = 0 # Counts the number of correct predictions.
steps_per_epoch = len(label_set) // batch_size
num_examples = steps_per_epoch * batch_size
totalLoss = 0
# Need to compute eval accuracy
for evalStep in xrange(steps_per_epoch):
input_batch, label_batch = nextBatch(data_set, labels_set, batchSize)
evalAcc, evalLoss = eval_step(input_batch, label_batch)
true_count += evalAcc * batchSize
totalLoss += evalLoss
precision = float(true_count) / num_examples
print(' Num examples: %d Num correct: %d Precision # 1: %0.04f' % (num_examples, true_count, precision))
print("Eval Loss: " + str(totalLoss))
The entire model is as follows:
class TextCNN(object):
"""
A CNN for text classification
Uses a convolutional, max-pooling and softmax layer.
"""
def __init__(
self, batchSize, numWords, num_classes,
embedding_size, filter_sizes, num_filters):
# Set place holders
self.input_placeholder = tf.placeholder(tf.float32,[batchSize,numWords,embedding_size,1])
self.labels = tf.placeholder(tf.int32, [batchSize,num_classes])
self.pKeep = tf.placeholder(tf.float32)
# Inference
'''
Ready to build conv layers followed by max pooling layers
Each conv layer produces a different shaped output so need to loop over
them and create a layer for each and then merge the results
'''
pooled_outputs = []
for i, filter_size in enumerate(filter_sizes):
with tf.name_scope("conv-maxpool-%s" % filter_size):
# Convolution Layer
filter_shape = [filter_size, embedding_size, 1, num_filters]
# W: Filter matrix
W = tf.Variable(tf.truncated_normal(filter_shape,stddev=0.01), name='W')
b = tf.Variable(tf.constant(0.0,shape=[num_filters]),name="b")
# Valid padding: Narrow convolution (no edge padded so filter slides over everything)
# Output size = (input_size (numWords in this case) + 2 * padding (0 in this case) - filter_size) + 1
conv = tf.nn.conv2d(
self.input_placeholder,
W,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
# Apply nonlinearity i.e add the bias to Wx + b
# Where Wx is the conv layer above
# Then run it through the activation function
h = tf.nn.relu(tf.nn.bias_add(conv, b),name='relu')
# Max-pooling over the outputs
# Max-pool to control the output size
# By taking only the best features determined by the filter
# Ksize is the size of the window of the input tensor
pooled = tf.nn.max_pool(
h,
ksize=[1, numWords - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID',
name="pool")
# Each pooled outputs a tensor of size
# [batchSize, 1, 1, num_filters] where num_filters represents the
# Number of features we wanted pooled
pooled_outputs.append(pooled)
# Combine all pooled features
num_filters_total = num_filters * len(filter_sizes)
# Concat the pool output along the 3rd (num_filters / feature size) dimension
self.h_pool = tf.concat(pooled_outputs, 3)
# Flatten
self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])
# Add drop out to regularize the learning curve / accuracy
with tf.name_scope("dropout"):
self.h_drop = tf.nn.dropout(self.h_pool_flat,self.pKeep)
# Fully connected output layer
with tf.name_scope("output"):
W = tf.Variable(tf.truncated_normal([num_filters_total,num_classes],stddev=0.01),name="W")
b = tf.Variable(tf.constant(0.0,shape=[num_classes]), name='b')
self.logits = tf.nn.xw_plus_b(self.h_drop, W, b, name='logits')
self.predictions = tf.argmax(self.logits, 1, name='predictions')
# Loss
with tf.name_scope("loss"):
losses = tf.nn.softmax_cross_entropy_with_logits(labels=self.labels,logits=self.logits, name="xentropy")
self.loss = tf.reduce_mean(losses)
# Accuracy
with tf.name_scope("accuracy"):
correct_predictions = tf.equal(self.predictions, tf.argmax(self.labels,1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
##################################################################################################################
# Running the training
# Define various parameters for network
batchSize = 100
numWords = 120
embedding_size = 300
num_classes = 4
filter_sizes = [3,4,5] # slide over a the number of words, i.e 3 words, 4 words etc...
num_filters = 126
maxSteps = 5000
initial_learning_rate = 0.001
dropoutRate = 1
data_set = np.load("/home/kevin/Documents/NSERC_2017/articles/classifyDataSet/TestSmaller_CNN_inputMat_0.npy")
labels_set = np.load("Test_NN_target_smaller.npy")
with tf.Graph().as_default():
sess = tf.Session()
with sess.as_default():
cnn = TextCNN(batchSize=batchSize,
numWords=numWords,
num_classes=num_classes,
num_filters=num_filters,
embedding_size=embedding_size,
filter_sizes=filter_sizes)
# Define training operation
# Pick an optimizer, set it's learning rate, and tell it what to minimize
global_step = tf.Variable(0,name='global_step', trainable=False)
optimizer = tf.train.AdamOptimizer(initial_learning_rate)
grads_and_vars = optimizer.compute_gradients(cnn.loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
# Summaries to save for tensor board
# Set directory
out_dir = "/home/kevin/Documents/NSERC_2017/articles/classifyDataSet/tf_logs/CNN_Embedding/"
# Loss and accuracy summaries
loss_summary = tf.summary.scalar("loss",cnn.loss)
acc_summary = tf.summary.scalar("accuracy", cnn.accuracy)
# Train summaries
train_summary_op = tf.summary.merge([loss_summary,acc_summary])
train_summary_dir = out_dir + "train/"
train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)
# Test summaries
test_summary_op = tf.summary.merge([loss_summary, acc_summary])
test_summary_dir = out_dir + "test/"
test_summary_write = tf.summary.FileWriter(test_summary_dir, sess.graph)
# Init all variables
init = tf.global_variables_initializer()
sess.run(init)
############################################################################################
def train_step(input_data, labels_data):
'''
Single training step
:param input_data: input
:param labels_data: labels to train to
'''
feed_dict = {
cnn.input_placeholder: input_data,
cnn.labels: labels_data,
cnn.pKeep: dropoutRate
}
_, step, summaries, loss, accuracy = sess.run(
[train_op, global_step, train_summary_op, cnn.loss, cnn.accuracy],
feed_dict=feed_dict)
train_summary_writer.add_summary(summaries, step)
###############################################################################################
def eval_step(input_data, labels_data, writer=None):
"""
Evaluates model on a test set
Single step
"""
feed_dict = {
cnn.input_placeholder: input_data,
cnn.labels: labels_data,
cnn.pKeep: 1.0
}
step, summaries, loss, accuracy = sess.run(
[global_step, test_summary_op, cnn.loss, cnn.accuracy],
feed_dict)
if writer:
writer.add_summary(summaries, step)
return accuracy, loss
###############################################################################
def nextBatch(data_set, labels_set, batchSize):
'''
Get the next batch of data
:param data_set: entire training or test data set
:param labels_set: entire training or test label set
:param batchSize: batch size
:return: a batch of the data and it's corresponding labels
'''
# Generate random row indices for the documents
rand_index = np.random.choice(data_set.shape[0], size=batchSize)
# Grab the data to give to the feed dicts
data_batch, labels_batch = data_set[rand_index, :, :], labels_set[rand_index, :]
# Resize for tensorflow
data_batch = data_batch.reshape([data_batch.shape[0],data_batch.shape[1],data_batch.shape[2],1])
return data_batch, labels_batch
################################################################################
def do_eval(data_set,
label_set,
batch_size):
"""
Runs one evaluation against the full epoch of data.
data_set: The set of embeddings to eval
label_set: the set of labels to eval
"""
# And run one epoch of eval.
true_count = 0 # Counts the number of correct predictions.
steps_per_epoch = len(label_set) // batch_size
num_examples = steps_per_epoch * batch_size
totalLoss = 0
# Need to compute eval accuracy
for evalStep in xrange(steps_per_epoch):
input_batch, label_batch = nextBatch(data_set, labels_set, batchSize)
evalAcc, evalLoss = eval_step(input_batch, label_batch)
true_count += evalAcc * batchSize
totalLoss += evalLoss
precision = float(true_count) / num_examples
print(' Num examples: %d Num correct: %d Precision # 1: %0.04f' % (num_examples, true_count, precision))
print("Eval Loss: " + str(totalLoss))
######################################################################################################
# Training Loop
for step in range(maxSteps):
input_batch, label_batch = nextBatch(data_set,labels_set,batchSize)
train_step(input_batch,label_batch)
# Evaluate over the entire data set on last eval
if step % 100 == 0:
print "On Step : " + str(step) + " of " + str(maxSteps)
do_eval(data_set, labels_set,batchSize)
The embedding is done before the model:
def createInputEmbeddedMatrix(corpusPath, maxWords, svName):
# Create a [docNum, Words per Art, Embedding Size] matrix to fill
genDocsPath = "gen_docs_classifyData_smallerTest_TFIDF.npy"
# corpus = "newsCorpus_word2vec_All_Corpus.mm"
dictPath = 'news_word2vec_smallerDict.dict'
tf_idf_path = "news_tfIdf_word2vec_All.tfidf_model"
gen_docs = np.load(genDocsPath)
dictionary = gensim.corpora.dictionary.Dictionary.load(dictPath)
tf_idf = gensim.models.tfidfmodel.TfidfModel.load(tf_idf_path)
corpus = corpora.MmCorpus(corpusPath)
numOfDocs = len(corpus)
embedding_size = 300
id2embedding = np.load("smallerID2embedding.npy").item()
# Need to process in batches as takes up a ton of memory
step = 5000
totalSteps = int(np.ceil(numOfDocs / step))
for i in range(totalSteps):
# inputMatrix = scipy.sparse.csr_matrix([step,maxWords,embedding_size])
inputMatrix = np.zeros([step, maxWords, embedding_size])
start = i * step
end = start + step
for docNum in range(start, end):
print "On docNum " + str(docNum) + " of " + str(numOfDocs)
# Extract the top N words
topWords, wordVal = tf_idfTopWords(docNum, gen_docs, dictionary, tf_idf, maxWords)
# doc = corpus[docNum]
# Need to track word dex and doc dex seperate
# Doc dex because of the batch processing
wordDex = 0
docDex = 0
for wordID in wordVal:
inputMatrix[docDex, wordDex, :] = id2embedding[wordID]
wordDex += 1
docDex += 1
# Save the batch of input data
# scipy.sparse.save_npz(svName + "_%d" % i, inputMatrix)
np.save(svName + "_%d.npy" % i, inputMatrix)
#####################################################################################
Turns out my error was in the creation of the input matrix.
for i in range(totalSteps):
# inputMatrix = scipy.sparse.csr_matrix([step,maxWords,embedding_size])
inputMatrix = np.zeros([step, maxWords, embedding_size])
start = i * step
end = start + step
for docNum in range(start, end):
print "On docNum " + str(docNum) + " of " + str(numOfDocs)
# Extract the top N words
topWords, wordVal = tf_idfTopWords(docNum, gen_docs, dictionary, tf_idf, maxWords)
# doc = corpus[docNum]
# Need to track word dex and doc dex seperate
# Doc dex because of the batch processing
wordDex = 0
docDex = 0
for wordID in wordVal:
inputMatrix[docDex, wordDex, :] = id2embedding[wordID]
wordDex += 1
docDex += 1
docDex should not have been reset to 0 on each iteration of the inner loop, I was effectively overwriting the first row of my input matrix and thus the rest were 0's.
Related
I've been walking through some tensorflow tutorials and am cobbling together a pet experiment. However, I am running into some dimension errors and I can seem to figure them out.
My goal: I have an input matrix for the shape 1xN. I have a training set of dimension 10xN. (1 and 10 were chosen arbitrarily). N is intended to represent N samples in a training set: 1 input value mapped to one vector of outputs. You can think of this as 1 input neuron and m output neurons. The training set is a set of these single values mapped to a 1d vector. I wish to train the network by running the set of these mapped inputs and outputs against it and reducing the error.
The simple algorithm that I am trying to accomplish:
For each value in the input vector
Load the input neuron with that value
Feed forward
Evaluate against the corresponding vector
Repeat to minimize error.
However, I seem to be getting mixed up with how to format the data to feed to the network. I have a placeholder of 1 input neurons and one of n output neurons. I want to follow the above algorithm but I am not sure if I am doing it right:
# Data parameters
num_frames = 10
stimuli_value_low = .00001
stimuli_value_high = 100
pixel_value_low = .00001
pixel_value_high = 256.0
stimuli_dimension = 1
frame_dimension = 10
stimuli = np.random.uniform(stimuli_value_low, stimuli_value_high, (stimuli_dimension, num_frames))
frames = np.random.uniform(pixel_value_low, pixel_value_high, (frame_dimension, num_frames))
# Parameters
learning_rate = 0.01
training_iterations = 1000
display_iteration = 10
# Network Parameters
n_hidden_1 = 100
n_hidden_2 = 100
num_input_neurons = stimuli_dimension
num_output_neurons = frame_dimension
# Create placeholders
input_placeholder = tf.placeholder("float", [None, num_input_neurons])
output_placeholder = tf.placeholder("float", [None, num_output_neurons])
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([num_input_neurons, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2, num_output_neurons]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([num_output_neurons]))
}
# Create model
def neural_net(input_placeholder):
# Hidden fully connected layer
layer_1 = tf.add(tf.matmul(input_placeholder, weights['h1']), biases['b1'])
# Hidden fully connected layer
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
# Output fully connected layer with a neuron for each pixel
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
return out_layer
# Construct model
logits = neural_net(input_placeholder)
# Define loss operation and optimizer
loss_operation = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = logits, labels = output_placeholder))
optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate)
train_operation = optimizer.minimize(loss_operation)
# Evaluate model (with test logits, for dropout to be disabled)
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(output_placeholder, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
# Start Training
with tf.Session() as sess:
# Run the initializer
sess.run(init)
for step in range(1, training_iterations + 1):
sess.run(train_operation, feed_dict = {X: stimuli, Y: frames})
if iteration % display_iteration == 0 or iteration == 1:
loss, accuracy = sess.run([loss_operation, accuracy_operation], feed_dict = {X: stimuli, Y: frames})
print("Step " + str(iteration) +
", Loss = " + "{:.4f}".format(loss) +
", Training Accuracy= " + \
"{:.3f}".format(acc))
print("Optimization finished!")
I think it is something to do with how I am structuring my data or feeding it to the run function.
Here is the error I am getting:
ValueError Traceback (most recent call last)
<ipython-input-420-7517598734d6> in <module>()
6 for step in range(1, training_iterations + 1):
7
----> 8 sess.run(train_operation, feed_dict = {X: stimuli, Y: frames})
9
10 if iteration % display_iteration == 0 or iteration == 1:
1 frames
/usr/local/lib/python3.6/dist-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
1147 'which has shape %r' %
1148 (np_val.shape, subfeed_t.name,
-> 1149 str(subfeed_t.get_shape())))
1150 if not self.graph.is_feedable(subfeed_t):
1151 raise ValueError('Tensor %s may not be fed.' % subfeed_t)
ValueError: Cannot feed value of shape (1, 10) for Tensor 'Placeholder_6:0', which has shape '(?, 1)'
How can I ensure I am formatting my input data correctly and forming my network corresponingly?
Turns out I had the dimensions of the arrays I was generating backwards:
stimuli = np.random.uniform(stimuli_value_low, stimuli_value_high, (stimuli_dimension, num_frames))
frames = np.random.uniform(pixel_value_low, pixel_value_high, (frame_dimension, num_frames))
should be:
stimuli = np.random.uniform(stimuli_value_low, stimuli_value_high, (num_frames, stimuli_dimension))
frames = np.random.uniform(pixel_value_low, pixel_value_high, (num_frames, frame_dimension))
I am trying to train a dataset of 10,000+ images using Tensorflow GPU (GTX 1060 Max-Q 6GB). Because the size of the image in my dataset is huge (512 x 424 pixels) I get a MemoryError.
Traceback (most recent call last):
File "train.py", line 33, in <module>
data = dataset.read_train_sets(train_path, img_size, classes, validation_size=validation_size)
File "/home/nabeel/tf-realsense-gesture/dataset.py", line 103, in read_train_sets
images, labels, img_names, cls = shuffle(images, labels, img_names, cls)
File "/home/nabeel/anaconda3/envs/tensorflow/lib/python2.7/site-packages/sklearn/utils/__init__.py", line 403, in shuffle
return resample(*arrays, **options)
File "/home/nabeel/anaconda3/envs/tensorflow/lib/python2.7/site-packages/sklearn/utils/__init__.py", line 327, in resample
resampled_arrays = [safe_indexing(a, indices) for a in arrays]
File "/home/nabeel/anaconda3/envs/tensorflow/lib/python2.7/site-packages/sklearn/utils/__init__.py", line 216, in safe_indexing
return X.take(indices, axis=0)
MemoryError
The problem with my code is that I am training all seven classes at the same time which is why I get a memory error. I want to process single class at a time.
I have tried to implement a while/for loop inside but every time a loop finishes, the .meta file is overwritten and only works on one class. Is there any way to train multiple classes at the time or one by one?
train.py
batch_size = 1
# 7 classess for recognitions
#classes = ['up']
classes = ['up','down','left','right','forward','backward','none']
#classes = ['up','down','left','right','forward','backward','none']
num_classes = len(classes)
# 20% of the data will automatically be used for validation
validation_size = 0.2
img_size = 200
num_channels = 3
train_path='training_data'
# load all the training and validation images and labels into memory
data = dataset.read_train_sets(train_path, img_size, classes, validation_size=validation_size)
print("Complete reading input data. Will Now print a snippet of it")
print("Number of files in Training-set:\t\t{}".format(len(data.train.labels)))
print("Number of files in Validation-set:\t{}".format(len(data.valid.labels)))
session = tf.Session()
x = tf.placeholder(tf.float32, shape=[batch_size,img_size,img_size,num_channels], name='x')
# labels
y_true = tf.placeholder(tf.float32, shape=[None, num_classes], name='y_true')
y_true_cls = tf.argmax(y_true, dimension=1)
#Network graph params
filter_size_conv1 = 3
num_filters_conv1 = 32
filter_size_conv2 = 3
num_filters_conv2 = 32
filter_size_conv3 = 3
num_filters_conv3 = 64
filter_size_conv4 = 3
num_filters_conv4 = 128
filter_size_conv5 = 3
num_filters_conv5 = 256
filter_size_conv6 = 3
num_filters_conv6 = 512
filter_size_conv7 = 3
num_filters_conv7= 1024
fc_layer_size = 2048
def create_weights(shape):
return tf.Variable(tf.truncated_normal(shape, stddev=0.05))
def create_biases(size):
return tf.Variable(tf.constant(0.05, shape=[size]))
def create_convolutional_layer(input,num_input_channels,conv_filter_size,num_filters):
# define the weights that will be trained
weights = create_weights(shape=[conv_filter_size, conv_filter_size, num_input_channels, num_filters])
# create biases
biases = create_biases(num_filters)
# Creat convolutional layer
layer = tf.nn.conv2d(input=input,filter=weights,strides=[1, 1, 1, 1],padding='SAME')
layer += biases
# max-pooling
layer = tf.nn.max_pool(value=layer,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
# Relu is the activation function
layer = tf.nn.relu(layer)
return layer
def create_flatten_layer(layer):
layer_shape = layer.get_shape()
num_features = layer_shape[1:4].num_elements()
# Flatten the layer so reshape to num_features
layer = tf.reshape(layer, [-1, num_features])
return layer
def create_fc_layer(input,
num_inputs,
num_outputs,
use_relu=True):
# define trainable weights and biases.
weights = create_weights(shape=[num_inputs, num_outputs])
biases = create_biases(num_outputs)
# Fully connected layer
layer = tf.matmul(input, weights) + biases
if use_relu:
layer = tf.nn.relu(layer)
return layer
layer_conv1 = create_convolutional_layer(input=x,num_input_channels=num_channels,conv_filter_size=filter_size_conv1,
num_filters=num_filters_conv1)
layer_conv2 = create_convolutional_layer(input=layer_conv1,
num_input_channels=num_filters_conv1,
conv_filter_size=filter_size_conv2,
num_filters=num_filters_conv2)
layer_conv3= create_convolutional_layer(input=layer_conv2,
num_input_channels=num_filters_conv2,
conv_filter_size=filter_size_conv3,
num_filters=num_filters_conv3)
layer_conv4= create_convolutional_layer(input=layer_conv3,
num_input_channels=num_filters_conv3,
conv_filter_size=filter_size_conv4,
num_filters=num_filters_conv4)
layer_conv5= create_convolutional_layer(input=layer_conv4,
num_input_channels=num_filters_conv4,
conv_filter_size=filter_size_conv5,
num_filters=num_filters_conv5)
layer_conv6= create_convolutional_layer(input=layer_conv5,
num_input_channels=num_filters_conv5,
conv_filter_size=filter_size_conv6,
num_filters=num_filters_conv6)
layer_conv7= create_convolutional_layer(input=layer_conv6,
num_input_channels=num_filters_conv6,
conv_filter_size=filter_size_conv7,
num_filters=num_filters_conv7)
layer_flat = create_flatten_layer(layer_conv7)
layer_fc1 = create_fc_layer(input=layer_flat,num_inputs=layer_flat.get_shape()[1:4].num_elements(),num_outputs=fc_layer_size,
use_relu=True)
layer_fc2 = create_fc_layer(input=layer_fc1, num_inputs=fc_layer_size,num_outputs=num_classes, use_relu=False)
y_pred = tf.nn.softmax(layer_fc2,name='y_pred')
y_pred_cls = tf.argmax(y_pred, dimension=1)
session.run(tf.global_variables_initializer())
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=layer_fc2,labels=y_true)
cost = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cost)
correct_prediction = tf.equal(y_pred_cls, y_true_cls)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
session.run(tf.global_variables_initializer())
def show_progress(epoch, feed_dict_train, feed_dict_validate, val_loss):
acc = session.run(accuracy, feed_dict=feed_dict_train)
val_acc = session.run(accuracy, feed_dict=feed_dict_validate)
msg = "Training Epoch {0} --- Training Accuracy: {1:>6.1%}, Validation Accuracy: {2:>6.1%}, Validation Loss: {3:.3f}"
print(msg.format(epoch + 1, acc, val_acc, val_loss))
total_iterations = 0
saver = tf.train.Saver()
def train(num_iteration):
global total_iterations
for i in range(total_iterations,total_iterations + num_iteration):
x_batch, y_true_batch, _, cls_batch = data.train.next_batch(batch_size)
x_valid_batch, y_valid_batch, _, valid_cls_batch = data.valid.next_batch(batch_size)
feed_dict_tr = {x: x_batch,y_true: y_true_batch}
feed_dict_val = {x: x_valid_batch,y_true: y_valid_batch}
session.run(optimizer, feed_dict=feed_dict_tr)
if i % int(data.train.num_examples/batch_size) == 0:
val_loss = session.run(cost, feed_dict=feed_dict_val)
epoch = int(i / int(data.train.num_examples/batch_size))
show_progress(epoch, feed_dict_tr, feed_dict_val, val_loss)
saver.save(session, '/home/nabeel/tf-realsense-gesture/')
total_iterations += num_iteration
train(num_iteration=6000)
Since you are Facing Out Of Memory Issue in CNNs, you can try the below steps:
Increase the Strides of the Convolutional Layer i.e., instead of using Sh = 1 and Sw = 1, you can use either Sh = 2 and Sw = 2. This will reduce the Dimensionality of the Image and hence will reduce the RAM Consumption. Code for the same is shown below:
layer = tf.nn.conv2d(input=input,filter=weights,strides=[1, 2, 2, 1],padding='SAME')
Verify if you really require 7 Convolutional Layers. You can try with Less Number of Convolutional Layers (4 or 5 or 6) and can check the performance. Because each Convolutional Layer with some Number of Filters will increase the Memory usage.
Replace tf.float32 with tf.float16 and if it works without any error.
Using an Inception Module instead of Convolutional Layer.
I am encapsulating an autoencoder cost calculation, in order to allow to be used with an swarm algorithms. The goal is to get a cost summary of the autoencoder sending a few parameters, so the method creates a model, train it and returns its cost tensor
def getAECost(dfnormalized, adamParam, iterations):
N_VISIBLE = 31
N_HIDDEN = 20
DEVICE = '/gpu:0' #Or '/cpu:0'
ITERATIONS = 1 + iterations
with tf.device(DEVICE):
# create node for input data(entiendo none columns and N_VISIBLE rows)
X = tf.placeholder("float", [None, N_VISIBLE], name='X')
# create nodes for hidden variables
W_init_max = 4 * np.sqrt(6. / (N_VISIBLE + N_HIDDEN))
W_init = tf.random_uniform(shape=[N_VISIBLE, N_HIDDEN])#,
# minval=-W_init_max,
# maxval=W_init_max)
#Inicialite our weight and bias
#W [784,500]
W = tf.Variable(W_init, name='W')
#Inicializate only bias of hidden layer
b = tf.Variable(tf.zeros([N_HIDDEN]), name='b')
#W_prime[500,784]
W_prime = tf.transpose(W) # tied weights between encoder and decoder
b_prime = tf.Variable(tf.zeros([N_VISIBLE]), name='b_prime')
#model that take our variables parameters
#Comportamiento de la red neuronal
def model(X, W, b, W_prime, b_prime):
tilde_X = X
#To decode ?
Y = tf.nn.sigmoid(tf.matmul(tilde_X, W) + b) # hidden state
#to reconstructed the input
Z = tf.nn.sigmoid(tf.matmul(Y, W_prime) + b_prime) # reconstructed input
return Z
# build model graph
pred = model(X, W, b, W_prime, b_prime)
# create cost function
#Sum of squared error
cost = tf.reduce_sum(tf.pow(X - pred, 2)) # minimize squared error
#Tensor to parameter learning rate
learning = tf.placeholder("float", name='learning')
train_op = tf.train.AdamOptimizer(learning).minimize(cost) # construct an optimizer
with tf.Session() as sess:
# you need to initialize all variables
tf.global_variables_initializer()
RATIO = adamParam
for i in range(ITERATIONS):
#Prepare input(minibach) from feed autoencoder
input_ = dfnormalized
# train autoencoder
sess.run(train_op, feed_dict={X: input_, learning: RATIO})
#Save last epoch and test
if(i == ITERATIONS-1):
#Get output as dataframe after training(Z is a array, we cast to list to append with a dataframe)
costAE = sess.run(cost, feed_dict={X: input_})
return costAE
It worked a few days ago (maybe I had another session on background), returning the method a float number, but nowadays is not working, getting the inizialization error
FailedPreconditionError: Attempting to use uninitialized value W
[[{{node W/read}}]]
in the training step
sess.run(train_op, feed_dict={X: input_, learning: RATIO})
Any advice about how this initialization problem can be solved, or how can I encapsulate a tensorflow model and session?
Thanks
You have to actually run the variables initializer, tf.global_variables_initializer() returns an op to be executed, it does not run the initialization for you. So the solution to your problem should be replacing the line
tf.global_variables_initializer()
with
sess.run(tf.global_variables_initializer())
I have tried what #Addy said, and reestructured the code to see more legible, and now works perfectly
class Model:
N_VISIBLE = 31
N_HIDDEN = 20
DEVICE = '/gpu:0' #Or '/cpu:0'
with tf.device(DEVICE):
# create node for input data(entiendo none columns and N_VISIBLE rows)
X = tf.placeholder("float", [None, N_VISIBLE], name='X')
# create nodes for hidden variables
W_init_max = 4 * np.sqrt(6. / (N_VISIBLE + N_HIDDEN))
W_init = tf.random_uniform(shape=[N_VISIBLE, N_HIDDEN])#,
# minval=-W_init_max,
# maxval=W_init_max)
#Inicialite our weight and bias
#W [784,500]
W = tf.Variable(W_init, name='W')
#Inicializate only bias of hidden layer
b = tf.Variable(tf.zeros([N_HIDDEN]), name='b')
#W_prime[500,784]
W_prime = tf.transpose(W) # tied weights between encoder and decoder
b_prime = tf.Variable(tf.zeros([N_VISIBLE]), name='b_prime')
#model that take our variables parameters
#Comportamiento de la red neuronal
def model(X, W, b, W_prime, b_prime):
tilde_X = X
#To decode ?
Y = tf.nn.sigmoid(tf.matmul(tilde_X, W) + b) # hidden state
#to reconstructed the input
Z = tf.nn.sigmoid(tf.matmul(Y, W_prime) + b_prime) # reconstructed input
return Z
# build model graph
pred = model(X, W, b, W_prime, b_prime)
# create cost function
#Sum of squared error
cost = tf.reduce_sum(tf.pow(X - pred, 2)) # minimize squared error
#Tensor to parameter learning rate
learning = tf.placeholder("float", name='learning')
train_op = tf.train.AdamOptimizer(learning).minimize(cost) # construct an optimizer
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
def train (self, data, adamParam, iterations):
input_ = data
RATIO = adamParam
for i in range(iterations):
# train autoencoder
_= self.sess.run(self.train_op, feed_dict={self.X: input_, self.learning: RATIO})
#print ("Model trained")
def getAECost(self, data):
input_ = data
return self.sess.run(self.cost, {self.X: data})
def trainAndGetCost (self, dataTrain, dataCost, adamParam, iterations):
self.train(dataTrain, adamParam, iterations)
return self.getAECost(dataCost)
I have tried dropout implementation in Tensorflow.
I do know that dropout should be declared as a placeholder and keep_prob parameter during training and testing should be different. However still almost broke my brain trying to find why with dropout the accuracy is so low. When keep_drop = 1, the train accuracy 99%, test accuracy 85%, with keep_drop = 0.5, both train and test accuracy is 16% Any ideas where to look into, anyone? Thank you!
def forward_propagation(X, parameters, keep_prob):
"""
Implements the forward propagation for the model: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX
Arguments:
X -- input dataset placeholder, of shape (input size, number of examples)
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3"
the shapes are given in initialize_parameters
Returns:
Z3 -- the output of the last LINEAR unit
"""
# Retrieve the parameters from the dictionary "parameters"
W1 = parameters['W1']
b1 = parameters['b1']
W2 = parameters['W2']
b2 = parameters['b2']
W3 = parameters['W3']
b3 = parameters['b3']
Z1 = tf.add(tf.matmul(W1,X),b1) # Z1 = np.dot(W1, X) + b1
A1 = tf.nn.relu(Z1) # A1 = relu(Z1)
A1 = tf.nn.dropout(A1,keep_prob) # apply dropout
Z2 = tf.add(tf.matmul(W2,A1),b2) # Z2 = np.dot(W2, a1) + b2
A2 = tf.nn.relu(Z2) # A2 = relu(Z2)
A2 = tf.nn.dropout(A2,keep_prob) # apply dropout
Z3 = tf.add(tf.matmul(W3,A2),b3) # Z3 = np.dot(W3,A2) + b3
return Z3
def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.0001, lambd = 0.03, train_keep_prob = 0.5,
num_epochs = 800, minibatch_size = 32, print_cost = True):
"""
Implements a three-layer tensorflow neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX.
Arguments:
X_train -- training set, of shape (input size = 12288, number of training examples = 1080)
Y_train -- test set, of shape (output size = 6, number of training examples = 1080)
X_test -- training set, of shape (input size = 12288, number of training examples = 120)
Y_test -- test set, of shape (output size = 6, number of test examples = 120)
learning_rate -- learning rate of the optimization
lambd -- L2 regularization hyperparameter
train_keep_prob -- probability of keeping a neuron in hidden layer for dropout implementation
num_epochs -- number of epochs of the optimization loop
minibatch_size -- size of a minibatch
print_cost -- True to print the cost every 100 epochs
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
"""
ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables
tf.set_random_seed(1) # to keep consistent results
seed = 3 # to keep consistent results
(n_x, m) = X_train.shape # (n_x: input size, m : number of examples in the train set)
n_y = Y_train.shape[0] # n_y : output size
costs = [] # To keep track of the cost
# Create Placeholders of shape (n_x, n_y)
X, Y = create_placeholders(n_x, n_y)
keep_prob = tf.placeholder(tf.float32)
# Initialize parameters
parameters = initialize_parameters()
# Forward propagation: Build the forward propagation in the tensorflow graph
Z3 = forward_propagation(X, parameters, keep_prob)
# Cost function: Add cost function to tensorflow graph
cost = compute_cost(Z3, Y, parameters, lambd)
# Backpropagation.
optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)
# Initialize all the variables
init = tf.global_variables_initializer()
# Start the session to compute the tensorflow graph
with tf.Session() as sess:
# Run the initialization
sess.run(init)
# Do the training loop
for epoch in range(num_epochs):
epoch_cost = 0. # Defines a cost related to an epoch
num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set
seed = seed + 1
minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed)
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# IMPORTANT: The line that runs the graph on a minibatch.
# Run the session to execute the "optimizer" and the "cost", the feedict should contain a minibatch for (X,Y).
_ , minibatch_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y, keep_prob: train_keep_prob})
epoch_cost += minibatch_cost / num_minibatches
# Print the cost every epoch
if print_cost == True and epoch % 100 == 0:
print ("Cost after epoch %i: %f" % (epoch, epoch_cost))
if print_cost == True and epoch % 5 == 0:
costs.append(epoch_cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
# lets save the parameters in a variable
parameters = sess.run(parameters)
print ("Parameters have been trained!")
# Calculate the correct predictions
correct_prediction = tf.equal(tf.argmax(Z3), tf.argmax(Y))
# Calculate accuracy on the test set
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print ("Train Accuracy:", accuracy.eval({X: X_train, Y: Y_train, keep_prob: 1.0}))
print ("Test Accuracy:", accuracy.eval({X: X_test, Y: Y_test, keep_prob: 1.0}))
return parameters
The algo is correct. It is just the keep_prob = 0.5 is too low.
Managed to get 87% accuracy on the test set with the following hyperparameters:
learning_rate = 0.00002, lambd = 0.03, train_keep_prob = 0.90, num_epochs = 1500, minibatch_size = 32,
In the first case your model was overfitting to the data, hence the large difference between the train and test accuracy. Dropout is a regularization technique to reduce the variance of the model by reducing the effect of particular nodes and hence prevent overfitting. But keeping the keep_prob = 0.5(too low) weakens the model and hence it underfits severely to the data, giving an accuracy as low as 16%. You should iterate by gradually decreasing the keep_prob value untill you find a suitable value.
My neural network code ends up outputting vectors with NaNs most of the time. The code is given below:
from __future__ import division, print_function
from six.moves import xrange
import time
import os
from glob import glob
from zipfile import ZipFile, ZIP_DEFLATED
import numpy as np
import tensorflow as tf
## Defining variables which have to be provided by user
## Defining the number of units in the RNN. This is also the size of the word
## and document embeddings
num_units = 100
##The number of data elements in a batch
batch_size = 1
##The folder where the npz files with the numpy arrays are stored.
npz_files_folder = "npz_files"
## Name of the file to which we want the model to be saved
model_file = "rnn_trial"
## Number of labels sampled from the noise for NCE
num_sampled = 50
## The dropout probability for the NN
dropout = 0.2
## The learning rate for the optimizer
lr = 0.1
## The number of epochs
epochs = 10
## Reading in the list of npz files with vectors for each document
doc_files = sorted(glob(os.path.join(npz_files_folder, "*.npz")))
num_classes = num_docs = len(doc_files)
## The tensor for storing a batch of sentences where each sentence is a
## sequence of word embeddings. This is an input to the NN
sentences = tf.placeholder(tf.float32, [batch_size, None, num_units],
name='sentences')
## The tensor for storing a batch of documents where each document is a
## sequence of sentence embeddings. This is an input to the NN
documents = tf.placeholder(tf.float32, [batch_size, None, num_units])
## The tensor for storing the labels for each batch of documents
labels = tf.placeholder(tf.float32, [batch_size])
## Here we define the LSTM used in the first layer
sent_lstm = tf.contrib.rnn.BasicLSTMCell(num_units)
sent_lstm = tf.contrib.rnn.DropoutWrapper(sent_lstm,
output_keep_prob=1.0-dropout)
## We define the initial_state of the LSTM in first layer here
initial_state_sent_lstm = sent_lstm.zero_state(batch_size, tf.float32)
## Here we get the outputs and states from the first layer
outputs_lstm, states_lstm = tf.nn.dynamic_rnn(sent_lstm,
inputs=sentences, initial_state=initial_state_sent_lstm)
## Here we define the forward GRU used in the second layer
doc_gru_fw = tf.contrib.rnn.GRUCell(num_units//2)
initial_state_doc_gru_fw = doc_gru_fw.zero_state(batch_size, tf.float32)
## Here we define the reverse GRU used in second layer.
doc_gru_bw = tf.contrib.rnn.GRUCell(num_units-num_units//2)
initial_state_doc_gru_bw = doc_gru_bw.zero_state(batch_size, tf.float32)
## Here we get the outputs and states from the second layer
outputs_gru, states_gru = tf.nn.bidirectional_dynamic_rnn(cell_fw=doc_gru_fw,
cell_bw=doc_gru_bw, initial_state_fw=initial_state_doc_gru_fw,
initial_state_bw=initial_state_doc_gru_bw,
inputs=documents)
# outputs_gru, states_gru = tf.nn.bidirectional_dynamic_rnn(cell_fw=doc_gru_fw,
# cell_bw=doc_gru_bw,
# inputs=documents, dtype=tf.float32)
## The final document embeddings
final_output = tf.reduce_mean(tf.concat(outputs_gru, 2), axis=1)
sigmoid_W = tf.Variable(
tf.truncated_normal([num_units, 1],
stddev=1.0/np.sqrt(num_units)))
sigmoid_b = tf.Variable(tf.zeros([1], dtype=tf.float32))
logits = tf.matmul(final_output, sigmoid_W) + sigmoid_b
y_ = (num_docs - 1) * tf.sigmoid(tf.reshape(logits, [-1]))
loss = tf.reduce_sum(tf.square(y_ - labels))
## Defining the training step
train = tf.train.AdamOptimizer(lr).minimize(loss)
## Initializing the session
sess = tf.Session()
## Initializing the variables
sess.run(tf.global_variables_initializer())
t = time.time()
for n in xrange(epochs):
result = False
for j, doc in enumerate(doc_files):
# if j==100:
# break
try:
npz_file = np.load(doc, allow_pickle=False)
except ValueError:
continue
train_label = np.array([j])
sent_files = sorted(npz_file.files)
temp_doc = np.array([])
temp_doc = np.reshape(temp_doc, (0, num_units))
for i, sent_file in enumerate(sent_files):
sent_input = np.reshape(npz_file[sent_file], (1, -1, num_units))
if 0 in sent_input.shape:
continue
output_1 = sess.run(outputs_lstm,
feed_dict={sentences: sent_input})
sent_embed = output_1[:, -1:]
temp_doc = np.concatenate([temp_doc] + list(sent_embed), 0)
## Training the model
temp_doc = np.array([temp_doc])
_, doc_vector = sess.run([train, final_output], feed_dict={
documents: temp_doc, labels: train_label})
if np.isnan(np.sum(doc_vector)):
result = True
print(result)
print("Finished with epoch ", n)
print()
doc_vecs_file_name = model_file + "_doc_vecs.zip"
with ZipFile(doc_vecs_file_name, 'w', ZIP_DEFLATED, True) as myzip:
for doc in doc_files:
# if doc_files.index(doc)==100:
# break
try:
npz_file = np.load(doc, allow_pickle=False)
except ValueError:
continue
sent_files = sorted(npz_file.files)
temp_doc = np.array([])
temp_doc = np.reshape(temp_doc, (0, num_units))
for i, sent_file in enumerate(sent_files):
sent_input = np.reshape(npz_file[sent_file], (1, -1, num_units))
if 0 in sent_input.shape:
continue
output_1 = sess.run(outputs_lstm,
feed_dict={sentences: sent_input})
sent_embed = output_1[:, -1:]
temp_doc = np.concatenate([temp_doc] + list(sent_embed), 0)
## Training the model
temp_doc = np.array([temp_doc])
doc_vec = sess.run(final_output, feed_dict={documents: temp_doc})
temp_file = doc.split(os.sep)[-1][:-4] + ".csv"
np.savetxt(temp_file, doc_vec, delimiter=',')
myzip.write(temp_file)
os.remove(temp_file)
saver = tf.train.Saver()
saver.save(sess, model_file)
print("Time taken = ", (time.time() - t))
If needed, I can upload a sample data set which you can use to try running the code yourself. With that sample data set, occasionally the training is completed without any NaNs creeping in. But, most of the time, NaNs pop up while training.
I am using tensorflow version 1.1.0 alongwith python 2.7.13 from the anaconda distribution.