I'm trying to create model using Tensorflow. When I checked the accuracy of the model I always see 1.0.
I tried with multiple layers, still I'm seeing the same accuracy. Not sure where I need to tweak the model to correct this.
Below is my part of the code.
# Placeholder
x = tf.placeholder(dtype=tf.float32, shape=[None, n_features])
y = tf.placeholder(dtype=tf.float32)
def neural_network_model(data):
hidden_1_layer = {'weights': tf.Variable(tf.random_normal([4, n_nodes_hl1])),
'biases': tf.Variable(tf.random_normal([n_nodes_hl1]))}
hidden_2_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
'biases': tf.Variable(tf.random_normal([n_nodes_hl2]))}
hidden_3_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])),
'biases': tf.Variable(tf.random_normal([n_nodes_hl3]))}
output_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl3, n_classes])),
'biases': tf.Variable(tf.random_normal([n_classes]))}
l1 = tf.add(tf.matmul(data, hidden_1_layer['weights']), hidden_1_layer['biases'])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1, hidden_2_layer['weights']), hidden_2_layer['biases'])
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2, hidden_3_layer['weights']), hidden_3_layer['biases'])
l3 = tf.nn.relu(l3)
output = tf.matmul(l3, output_layer['weights']) + output_layer['biases']
return output
def train_neural_network(x):
import pdb;
pdb.set_trace()
prediction = neural_network_model(x)
cost = tf.reduce_mean(tf.squared_difference(prediction, y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
hm_epochs = 5
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Training the data
for epoch in range(hm_epochs):
epoch_loss = 0
for i in range(0, len(y_train) // batch_size):
epoch_x, epoch_y = next_batch(batch_size, x_train, y_train )
_, c = sess.run([optimizer, cost], feed_dict = {x: epoch_x, y: epoch_y.reshape(-1,1)})
epoch_loss += c
print ('Completed %d'%(i))
print('Epoch', epoch, 'Completed out of', hm_epochs, 'loss:', epoch_loss)
#testing the data
correct = tf.equal(tf.argmax(prediction,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print ('Accuracy:', accuracy.eval({x:x_test, y:y_test.reshape(-1,1)}))
train_neural_network(x)
I appreciate your help.
Thanks
Bhajesh
Can you check if your prediction results (prediction_res) are the same as the labels (y)?
_, c, prediction_res = sess.run([optimizer, cost, prediction], feed_dict = {x: epoch_x, y: epoch_y.reshape(-1,1)})
If its the same as y then its generating the correct outputs depending on the data.
I identify this issue. This was because I have not converted label to one-hot representation.
What I feel we have to convert label variable to one-hot representation then it will work properly. It seems like neural network expect label to be in one-hot representation.
I'm working on identifying the reason behind it. If anybody have any idea please comment.
Related
This question is related to the starting one posted here.
The problem is to classify rows so that the classification of row number i can rely on the data for all the previous rows including class membership. The linked post contains an answer which is posted bellow.
For the sake of experimentation I've used a set of randomly crafted data, where the classifying property is a 0,1 uniform random variable.
What strikes me is that the loss of the model in the above example is really low and the accuracy is 99% whereas I would expect something in the 50% range.
So I am assuming that the way the model is testing the classification is leaking information somehow.
Does anybody happen to see what's the issue? What would be the proper way to evaluate the accuracy in such scenario?
import tensorflow as tf
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler, OneHotEncoder
from random import randint
SIZE = 100
df = pd.DataFrame({'Temperature': list(range(SIZE)),
'Weight': [randint(1,100) for _ in range(SIZE)],
'Size': [randint(1,10000) for _ in range(SIZE)],
'Property': [randint(0,1) for _ in range(SIZE)]})
df.Property = df.Property.shift(-1)
print ( df.head() )
# parameters
time_steps = 1
inputs = 3
outputs = 2
df = df.iloc[:-1,:]
df = df.values
train_X = df[:, :-1]
train_y = df[:, -1]
scaler = MinMaxScaler(feature_range=(0, 1))
train_X = scaler.fit_transform(train_X)
train_X = train_X[:,None,:]
onehot_encoder = OneHotEncoder()
encode_categorical = train_y.reshape(len(train_y), 1)
train_y = onehot_encoder.fit_transform(encode_categorical).toarray()
learning_rate = 0.001
epochs = 50000
batch_size = int(train_X.shape[0]/2)
length = train_X.shape[0]
display = 100
neurons = 100
tf.reset_default_graph()
X = tf.placeholder(tf.float32, [None, time_steps, inputs])
y = tf.placeholder(tf.float32, [None, outputs])
cell = tf.contrib.rnn.BasicLSTMCell(num_units=neurons, activation=tf.nn.relu)
cell_outputs, states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
stacked_outputs = tf.reshape(cell_outputs, [-1, neurons])
out = tf.layers.dense(inputs=stacked_outputs, units=outputs)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(
labels=y, logits=out))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
accuracy = tf.metrics.accuracy(labels = tf.argmax(y, 1),
predictions = tf.argmax(out, 1),
name = "accuracy")
precision = tf.metrics.precision(labels=tf.argmax(y, 1),
predictions=tf.argmax(out, 1),
name="precision")
recall = tf.metrics.recall(labels=tf.argmax(y, 1),
predictions=tf.argmax(out, 1),
name="recall")
f1 = 2 * accuracy[1] * recall[1] / ( precision[1] + recall[1] )
with tf.Session() as sess:
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
for steps in range(epochs):
mini_batch = zip(range(0, length, batch_size),
range(batch_size, length+1, batch_size))
for (start, end) in mini_batch:
sess.run(training_op, feed_dict = {X: train_X[start:end,:,:],
y: train_y[start:end,:]})
if (steps+1) % display == 0:
loss_fn = loss.eval(feed_dict = {X: train_X, y: train_y})
print('Step: {} \tTraining loss: {}'.format((steps+1), loss_fn))
acc, prec, recall, f1 = sess.run([accuracy, precision, recall, f1],
feed_dict = {X: train_X, y: train_y})
print('\nEvaluation on training set')
print('Accuracy:', acc[1])
print('Precision:', prec[1])
print('Recall:', recall[1])
print('F1 score:', f1)
I intended to learn about PCA using SVD and therefore implemented it and tried to use it on MNIST data.
import numpy as np
class PCA(object):
def __init__ (self, X):
self.N, self.dim, *rest = X.shape
self.X = X
'''
U S V' = svd(X)
'''
X_std = (X - np.mean(X, axis=0))/(np.std(X, axis=0)+1e-13)
[self.U, self.s, self.Vt] = np.linalg.svd(X_std)
self.V = self.Vt.T
self.variance_ratio = self.s
def variance_explained_ratio (self):
'''
Returns the cumulative variance captured with each added principal component
'''
return np.cumsum(self.variance_ratio)/np.sum(self.variance_ratio)
def X_projected (self, r):
'''
Returns the data X projected along the first r principal components
'''
if r is None:
r = self.dim
X_proj = np.zeros((r, self.N))
P_reduce = self.V[:,0:r]
X_proj = self.X.dot(P_reduce)
return X_proj
Now with this implementation for PCA, I tried to apply it to MNIST data to see the performance with and without PCA for classification using softmax. The code for that is as follows:
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# Using first 10000 images
train_data = mnist.train.images[:10000,:]
train_labels = mnist.train.labels[:10000,:]
pca1 = PCA(train_data)
pca_test = PCA(mnist.test.images)
n_components = 14
X_proj1 = pca1.X_projected(r=n_components)
X_projTest = pca_test.X_projected(r=n_components)
t1 = time.time()
x = tf.placeholder(tf.float32, [None, n_components])
W = tf.Variable(tf.zeros([n_components, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.cast(tf.nn.softmax(tf.matmul(x, W) + b), tf.float32)
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_*tf.log(y),
reduction_indices=[1]))
train_step =
tf.train.GradientDescentOptimizer(0.7).minimize(cross_entropy)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
m = 10000
for _ in range(1000):
indices = random.sample(range(0, m), 100)
batch_xs = X_proj1[indices]
batch_ys = train_labels[indices]
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
accuracy = sess.run(accuracy, feed_dict={x: X_projTest, y_:
mnist.test.labels})
print("Accuracy: %f" % accuracy)
sess.close()
t2 = time.time()
print ("Total time taken: %f seconds" % (t2-t1))
The accuracy I obtain using this is only around 19% whereas with the train_data and train_labels, the accuracy is more than 90%. Could someone suggest where I'm going wrong?
When we use PCA or feature scaling, we set the underlying parameters on the training dataset and then just apply/transform it on the test dataset. The test dataset is not used to calculate the key parameters, or in this case, SVD should only be applied on the train dataset.
e.g. in sklearn's PCA, we use the following code :
from sklearn.decomposition import PCA
pca = PCA(n_components = 'whatever number you want')
X_train_pca = pca.fit_transform(X_train)
X_test_pca = pca.transform(X_test)
Note, that we fit on the training dataset, X_train and transform on X_test.
Similarly, for the above implementation, there's no need to create the pca_test object. Tweak the X_projTest variable to :
X_projTest = mnist.test.images.dot(pca1.V[:,0:n_components])
This should solve for the low test accuracy.
I'm following the example of Sentdex on youtube and here is the code I have
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot = True)
n_nodes_hl1 = 500
n_nodes_hl2 = 500
n_nodes_hl3 = 500
n_classes = 10
batch_size = 100
x = tf.placeholder('float', [None, 784])
y = tf.placeholder('float')
def neural_network_model(data):
hidden_1_layer = {'weights':tf.Variable(tf.random_normal([784, n_nodes_hl1])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl1]))}
hidden_2_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl2]))}
hidden_3_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl3]))}
output_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl3, n_classes])),
'biases':tf.Variable(tf.random_normal([n_classes])),}
l1 = tf.add(tf.matmul(data,hidden_1_layer['weights']), hidden_1_layer['biases'])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1,hidden_2_layer['weights']), hidden_2_layer['biases'])
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2,hidden_3_layer['weights']), hidden_3_layer['biases'])
l3 = tf.nn.relu(l3)
output = tf.matmul(l3,output_layer['weights']) + output_layer['biases']
return output
def train_neural_network(x):
prediction = neural_network_model(x)
# OLD VERSION:
#cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(prediction,y) )
# NEW:
cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y) )
optimizer = tf.train.AdamOptimizer().minimize(cost)
hm_epochs = 10
with tf.Session() as sess:
# OLD:
#sess.run(tf.initialize_all_variables())
# NEW:
sess.run(tf.global_variables_initializer())
for epoch in range(hm_epochs):
epoch_loss = 0
for _ in range(int(mnist.train.num_examples/batch_size)):
epoch_x, epoch_y = mnist.train.next_batch(batch_size)
_, c = sess.run([optimizer, cost], feed_dict={x: epoch_x, y: epoch_y})
epoch_loss += c
print('Epoch', epoch, 'completed out of',hm_epochs,'loss:',epoch_loss)
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print('Accuracy:',accuracy.eval({x:mnist.test.images, y:mnist.test.labels}))
train_neural_network(x)
It raises this error :
ValueError: Shapes must be equal rank, but are 2 and 1
From merging shape 0 with other shapes. for 'SparseSoftmaxCrossEntropyWithLogits/packed' (op: 'Pack') with input shapes: [?,10], [10].
on this line:
cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y) )
I think it is about the size of y that induced the error, I tried using
cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
prediction, tf.squeeze(y)))
I'm pretty sure this means that the cost function induces the error (shown above) prediction and y are not the same shape, but I don't understand TensorFlow well enough to know how to fix it. I don't even really understand where y is being set, I got most of this code from a tutorial and fiddled with it to apply it to a different dataset. How can I fix this error?
ps I tried to print out prediction, it gives me two outputs and i guess thats where the error comes from:
prediction
(<tf.Tensor 'MatMul_39:0' shape=(?, 10) dtype=float32>,
<tf.Variable 'Variable_79:0' shape=(10,) dtype=float32_ref>)
Since you are using one_hot=True while reading the input data, just define correct shape for the y placeholder
# redefine the label and input with exact data type and shape
x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.float32, shape=[None, n_classes])
You have a comma between the closing parenthesis and the dict bracket in this statement:
output_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl3, n_classes])),'biases':tf.Variable(tf.random_normal([n_classes])),}
just before the close bracket:
...([n_classes])),}
#WORKING CODE
#I had the same problem as you, (not counting the comma) and i´m sorry i don´t remember the things i changed, but hopefully this will work
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist= input_data.read_data_sets("/tmp/data/", one_hot=True)
#10 clasees, 0-9
n_nodes_hl1=500
n_nodes_hl2=500
n_nodes_hl3=500
n_classes=10
batch_size=100
x=tf.placeholder('float',[None,784])
y=tf.placeholder('float')
def neural(data):
hidden_1_layer={'weights':tf.Variable(tf.random_normal([784, n_nodes_hl1])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl1]))}
hidden_2_layer={'weights':tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl2]))}
hidden_3_layer={'weights':tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl3]))}
output_layer={'weights':tf.Variable(tf.random_normal([n_nodes_hl3, n_classes])),
'biases':tf.Variable(tf.random_normal([n_classes]))}
l1=tf.add(tf.matmul(data, hidden_1_layer['weights']), hidden_1_layer['biases'])
li= tf.nn.relu(l1)
l2=tf.add(tf.matmul(l1, hidden_2_layer['weights']), hidden_2_layer['biases'])
l2= tf.nn.relu(l2)
l3=tf.add(tf.matmul(l2, hidden_3_layer['weights']), hidden_3_layer['biases'])
l3= tf.nn.relu(l3)
output= tf.matmul(l3, output_layer['weights'])+ output_layer['biases']
return output
def train(x):
prediction=neural(x)
cost= tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction,labels=y))
optimizer=tf.train.AdamOptimizer().minimize(cost)
hm_epochs=20
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(hm_epochs):
epoch_loss=0
for _ in range(int(mnist.train.num_examples/batch_size)):
epoch_x,epoch_y = mnist.train.next_batch(batch_size)
_,c=sess.run([optimizer,cost],feed_dict={x: epoch_x, y: epoch_y})
epoch_loss += c
print('Epoch', epoch, 'completed out of', hm_epochs, 'loss:',epoch_loss)
correct= tf.equal(tf.argmax(prediction,1), tf.argmax(y,1))
accuracy= tf.reduce_mean(tf.cast(correct,'float'))
print('Accuracy:',accuracy.eval({x:mnist.test.images, y:mnist.test.labels}))
train(x)
I created a simple TensorFlow program that tries to predict the next character using the previous 3 characters in a body of text.
A single input could look like this:
np.array(['t','h','i'])
with the target about being
np.array(['s'])
I'm trying to expand this to output the next say 4 character rather than just the next character. To do this I tried feeding in a longer array to y
np.array(['s','','i'])
In addition to changing the y to
y = tf.placeholder(dtype=tf.int32, shape=[None, n_steps])
however, this yields the error:
Rank mismatch: Rank of labels (received 2) should equal rank of logits
minus 1 (received 2).
Here's the full code
embedding_size=40
n_neurons = 200
n_output = vocab_size
learning_rate = 0.001
with tf.Graph().as_default():
x = tf.placeholder(dtype=tf.int32, shape=[None, n_steps])
y = tf.placeholder(dtype=tf.int32, shape=[None])
seq_length = tf.placeholder(tf.int32, [None])
# Let's set up the embedding converting words to vectors
embeddings = tf.Variable(tf.random_uniform(shape=[vocab_size, embedding_size], minval=-1, maxval=1))
train_input = tf.nn.embedding_lookup(embeddings, x)
basic_cell = tf.nn.rnn_cell.GRUCell(num_units=n_neurons)
outputs, states = tf.nn.dynamic_rnn(basic_cell, train_input, sequence_length=seq_length, dtype=tf.float32)
logits = tf.layers.dense(states, units=vocab_size, activation=None)
predictions = tf.nn.softmax(logits)
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=y,
logits=logits)
loss = tf.reduce_mean(xentropy)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for r in range(1000):
x_batch, y_batch, seq_length_batch = input_fn()
feed_dict = {x: x_batch, y: y_batch, seq_length: seq_length_batch}
_, loss_out = sess.run([training_op, loss], feed_dict=feed_dict)
if r % 1000 == 0:
print("loss_out", loss_out)
sample_text = "for th"
sample_text_ids = np.expand_dims(np.array([w_to_id[c] for c in sample_text]+[0, 0], dtype=np.int32), 0)
prediction_out = sess.run(predictions, feed_dict={x: sample_text_ids, seq_length: np.array([len(sample_text)])})
print("Result:", id_to_w[np.argmax(prediction_out)])
In case of many-to-many RNN, you should use tf.contrib.seq2seq.sequence_loss to calculate per time step loss. Your code should look like this:
...
logits = tf.layers.dense(states, units=vocab_size, activation=None)
weights = tf.sequence_mask(seq_length, n_steps)
xentropy = tf.contrib.seq2seq.sequence_loss(logits, y, weights)
...
See here for more details on tf.contrib.seq2seq.sequence_loss.
I am getting an error when calculating the cost function using softmax. It says that the shape of my logits and labels are not match
InvalidArgumentError (see above for traceback): logits and labels must be same size: logits_size=[1000,2] labels_size=[1,1000]
[[Node: SoftmaxCrossEntropyWithLogits = SoftmaxCrossEntropyWithLogits[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/cpu:0"](Reshape, Reshape_1)]]
Even if I do reshape or transpose the shape cannot match as logits is [1000,2] size and labels is [1000,1]. How do I tackle this issue?
n_nodes_hl1 = 250
n_nodes_hl2 = 250
n_classes = 2
batch_size = 1000
with open("xdf.pickle", 'rb') as f:
features = pickle.load(f)
with open("ydf.pickle", 'rb') as f:
labels = pickle.load(f)
def neural_network_model(data, feature_count):
hidden_layer_1 = {'weights': tf.Variable(tf.random_normal([feature_count, n_nodes_hl1])),
'biases': tf.Variable(tf.random_normal([n_nodes_hl1]))}
hidden_layer_2 = {'weights': tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
'biases': tf.Variable(tf.random_normal([n_nodes_hl2]))}
output_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl2, n_classes])),
'biases': tf.Variable(tf.random_normal([n_classes])), }
l1 = tf.add(tf.matmul(data, hidden_layer_1['weights']), hidden_layer_1['biases'])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1, hidden_layer_2['weights']), hidden_layer_2['biases'])
l2 = tf.nn.relu(l2)
output = tf.matmul(l2, output_layer['weights']) + output_layer['biases']
return output
def train_neural_network(x, y, features, labels):
X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=0.2)
prediction = neural_network_model(x, len(features.columns))
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
hm_epochs = 1
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for epoch in range(hm_epochs):
epoch_loss = 0
for i in range(int(len(X_train) / batch_size)):
epoch_x = X_train[i*batch_size: min((i + 1)*batch_size, len(X_train))]
epoch_y = y_train[i*batch_size: min((i + 1)*batch_size, len(y_train))]
i, c = sess.run([optimizer, cost], feed_dict = {x:epoch_x, y:epoch_y})
epoch_loss += c
print('Epoch', epoch, ' completed out of ', hm_epochs, ' loss: ', epoch_loss)
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print('Accuracy: ', accuracy.eval({x: X_test, y: y_test}))
x = tf.placeholder('float', [None, len(features.columns)])
y = tf.placeholder('float')
train_neural_network(x, y, features, labels)
Since I do not know what data you have, I can only guess. Your network has n_classes output neurons (2 in your case), but I assume that your labels are binary (0 or 1). You will either have to reduce the number of output neurons to 1 (this should work since you only have two classes), or convert your labels into one-hot labels ([1,0] for a label 0 and [0,1] for a label 1).
You could also try using tf.nn.sparse_softmax_cross_entropy_with_logits(), maybe it will work, so that you don't have to change the rest of the network...