Tensorflow with batch size and wrong dimesion - tensorflow

Here are my code:
# Model parameters: W and b
# tf.reset_default_graph()
W = tf.get_variable("weight",shape = [784, 10], dtype=tf.float32)
b = tf.get_variable("b", shape=(784,10), dtype= tf.float32)
input_X = tf.placeholder('float32', shape = (None,10))
input_y = tf.placeholder('float32', [784, 10])
ogits = W*input_X + b
probas = tf.nn.softmax(logits)
classes = tf.argmax(probas)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = input_y, logits = logits))
step = tf.train.AdamOptimizer(loss)
s.run(tf.global_variables_initializer())
BATCH_SIZE = 512
EPOCHS = 40
# for logging the progress right here in Jupyter (for those who don't have TensorBoard)
simpleTrainingCurves = matplotlib_utils.SimpleTrainingCurves("cross-entropy", "accuracy")
for epoch in range(EPOCHS): # we finish an epoch when we've looked at all training samples
batch_losses = []
for batch_start in range(0, X_train_flat.shape[0], BATCH_SIZE): # data is already shuffled
_, batch_loss = s.run([step, loss], {input_X: X_train_flat[batch_start:batch_start+BATCH_SIZE],
input_y: y_train_oh[batch_start:batch_start+BATCH_SIZE]})
# collect batch losses, this is almost free as we need a forward pass for backprop anyway
batch_losses.append(batch_loss)
train_loss = np.mean(batch_losses)
val_loss = s.run(loss, {input_X: X_val_flat, input_y: y_val_oh}) # this part is usually small
train_accuracy = accuracy_score(y_train, s.run(classes, {input_X: X_train_flat})) # this is slow and usually skipped
valid_accuracy = accuracy_score(y_val, s.run(classes, {input_X: X_val_flat}))
simpleTrainingCurves.add(train_loss, val_loss, train_accuracy, valid_accuracy)
the error is:
/opt/conda/lib/python3.6/site-packages/tensorflow/python/client/session.py
in _run(self, handle, fetches, feed_dict, options, run_metadata)
973 'Cannot feed value of shape %r for Tensor %r, '
974 'which has shape %r'
--> 975 % (np_val.shape, subfeed_t.name, str(subfeed_t.get_shape())))
976 if not self.graph.is_feedable(subfeed_t):
977 raise ValueError('Tensor %s may not be fed.' % subfeed_t)
ValueError: Cannot feed value of shape (512, 784) for Tensor
'Placeholder_2:0', which has shape '(?, 10)'
I am new at tensorflow and coursena is what I am learning. Please help me.

The issue is with the weights, Bias, input_x and input_y placeholder size.
Below is the modified code that should resolve your issue.
W = tf.get_variable('Weight', shape=(784,10), dtype=tf.float32)
b = tf.get_variable('bias', shape=(10,), dtype=tf.float32)
input_X = tf.placeholder(shape=(None, 784), dtype=tf.float32) #None is batch size and 784 is the input.
input_y = tf.placeholder(shape=(None, 10), dtype=tf.float32) # None is batch size, 10 is number of classess.
logits = tf.matmul(input_X, W) + b
probas = tf.nn.softmax(logits)
classes = tf.argmax(probas, 1)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=input_y))
step = tf.train.RMSPropOptimizer(0.001).minimize(loss)
s.run(tf.global_variables_initializer())
BATCH_SIZE = 512
EPOCHS = 50
simpleTrainingCurves = matplotlib_utils.SimpleTrainingCurves("cross-entropy", "accuracy")
for epoch in range(EPOCHS):
batch_losses = []
for batch_start in range(0, X_train_flat.shape[0], BATCH_SIZE): # data is already shuffled
_, batch_loss = s.run([step, loss], {input_X: X_train_flat[batch_start:batch_start+BATCH_SIZE],
input_y: y_train_oh[batch_start:batch_start+BATCH_SIZE]})
batch_losses.append(batch_loss)
train_loss = np.mean(batch_losses)
val_loss = s.run(loss, {input_X: X_val_flat, input_y: y_val_oh})
train_accuracy = accuracy_score(y_train, s.run(classes, {input_X: X_train_flat}))
valid_accuracy = accuracy_score(y_val, s.run(classes, {input_X: X_val_flat}))
simpleTrainingCurves.add(train_loss, val_loss, train_accuracy, valid_accuracy)

Related

ValueError: Cannot feed value of shape (128, 28, 28) for Tensor 'Placeholder:0', which has shape '(?, 784)'

I am new to Tensorflow and Machine Learning and trying out CNN using Tensorflow with my custom input data. But I am getting the error attached below.
The Data or Image Size is 28x28 with 15 Labels.
I am not getting the numpy reshape thing in this script or the error.
Help is highly appreciated.
import tensorflow as tf
import os
import skimage.data
import numpy as np
import random
def load_data(data_directory):
directories = [d for d in os.listdir(data_directory)
if os.path.isdir(os.path.join(data_directory, d))]
labels = []
images = []
for d in directories:
label_directory = os.path.join(data_directory, d)
file_names = [os.path.join(label_directory, f)
for f in os.listdir(label_directory)
if f.endswith(".jpg")]
for f in file_names:
images.append(skimage.data.imread(f))
labels.append(d)
print(str(d)+' Completed')
return images, labels
ROOT_PATH = "H:\Testing\TrainingData"
train_data_directory = os.path.join(ROOT_PATH, "Training")
test_data_directory = os.path.join(ROOT_PATH, "Testing")
print('Loading Data...')
images, labels = load_data(train_data_directory)
print('Data has been Loaded')
n_classes = 15
training_examples = 10500
test_examples = 4500
batch_size = 128
x = tf.placeholder('float', [None, 784])
y = tf.placeholder('float')
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME')
def maxpool2d(x):
return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
def neural_network_model(x):
weights = {'W_Conv1':tf.Variable(tf.random_normal([5,5,1,32])),
'W_Conv2':tf.Variable(tf.random_normal([5,5,32,64])),
'W_FC':tf.Variable(tf.random_normal([7*7*64, 1024])),
'Output':tf.Variable(tf.random_normal([1024, n_classes]))}
biases = {'B_Conv1':tf.Variable(tf.random_normal([32])),
'B_Conv2':tf.Variable(tf.random_normal([64])),
'B_FC':tf.Variable(tf.random_normal([1024])),
'Output':tf.Variable(tf.random_normal([n_classes]))}
x = tf.reshape(x, shape=[-1,28,28,1])
conv1 = conv2d(x, weights['W_Conv1'])
conv1 = maxpool2d(conv1)
conv2 = conv2d(conv1, weights['W_Conv2'])
conv2 = maxpool2d(conv2)
fc = tf.reshape(conv2, [-1, 7*7*64])
fc = tf.nn.relu(tf.matmul(fc, weights['W_FC'])+biases['B_FC'])
output = tf.matmul(fc, weights['Output'])+biases['Output']
return output
def next_batch(num, data, labels):
idx = np.arange(0 , len(data))
np.random.shuffle(idx)
idx = idx[:num]
data_shuffle = [data[ i] for i in idx]
labels_shuffle = [labels[ i] for i in idx]
return np.asarray(data_shuffle), np.asarray(labels_shuffle)
def train_neural_network(x):
prediction = neural_network_model(x)
cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y) )
optimizer = tf.train.AdamOptimizer().minimize(cost)
hm_epochs = 10
with tf.Session() as sess:
# OLD:
#sess.run(tf.initialize_all_variables())
# NEW:
sess.run(tf.global_variables_initializer())
for epoch in range(hm_epochs):
epoch_loss = 0
for _ in range(int(training_examples/batch_size)):
epoch_x, epoch_y = next_batch(batch_size, images, labels)
_, c = sess.run([optimizer, cost], feed_dict={x: epoch_x, y: epoch_y})
epoch_loss += c
print('Epoch', epoch, 'completed out of',hm_epochs,'loss:',epoch_loss)
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print('Accuracy:',accuracy.eval({x: images, y: labels}))
print('Training Neural Network...')
train_neural_network(x)
What am I doing wrong? What is needed to be fixed and how do I fix the shape of numpy array?
If you look closely, you'll see that you have two x placeholders:
x = tf.placeholder('float', [None, 784]) # global
...
x = tf.reshape(x, shape=[-1,28,28,1]) # in neural_network_model
One of them is in the function scope, hence not visible in train_neural_network, so tensorflow takes the one with [?, 784] shape. You should get rid of one of them.
Also note that your training data has the rank 3, i.e. [batch_size, 28, 28], so it's not directly compatible with any of those placeholders.
To feed it into the first x, take epoch_x.reshape([-1, 784]). For the second placeholder (once you make it visible), take epoch_x.reshape([-1, 28, 28, 1]).

Error: Tensorflow BRNN logits and labels must be same size

I have an error like this:
InvalidArgumentError (see above for traceback): logits and labels must
be same size: logits_size=[10,9] labels_size=[7040,9] [[Node:
SoftmaxCrossEntropyWithLogits =
SoftmaxCrossEntropyWithLogits[T=DT_FLOAT,
_device="/job:localhost/replica:0/task:0/gpu:0"](Reshape, Reshape_1)]]
But I can't find the tensor which occurs this error.... I think it is appeared by size mismatching...
My Input size is batch_size * n_steps * n_input
so, It will be 10*704*100, And I want to make the output
batch_size * n_steps * n_classes => It will by 10*700*9, by Bidirectional RNN
How should I change this code to fix the error?
batch_size means the number of datas like this:
data 1 : ABCABCABCAAADDD...
...
data 10 : ABCCCCABCDBBAA...
And
n_step means the length of each data ( The data was padded by 'O' to fix the length of each data) : 704
And
n_input means the data how to express the each alphabet in each data like this:
A - [1, 2, 1, -1, ..., -1]
And the output of the learning should be like this:
output of data 1 : XYZYXYZYYXY ...
...
output of data 10 : ZXYYRZYZZ ...
the each alphabet of output was effected by the surrounding and sequence of alphabet of input.
learning_rate = 0.001
training_iters = 100000
batch_size = 10
display_step = 10
# Network Parameters
n_input = 100
n_steps = 704 # timesteps
n_hidden = 50 # hidden layer num of features
n_classes = 9
x = tf.placeholder("float", [None, n_steps, n_input])
y = tf.placeholder("float", [None, n_steps, n_classes])
weights = {
'out': tf.Variable(tf.random_normal([2*n_hidden, n_classes]))
}
biases = {
'out': tf.Variable(tf.random_normal([n_classes]))
}
def BiRNN(x, weights, biases):
x = tf.unstack(tf.transpose(x, perm=[1, 0, 2]))
# Forward direction cell
lstm_fw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
# Backward direction cell
lstm_bw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
# Get lstm cell output
try:
outputs, _, _ = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
dtype=tf.float32)
except Exception: # Old TensorFlow version only returns outputs not states
outputs = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
dtype=tf.float32)
# Linear activation, using rnn inner loop last output
return tf.matmul(outputs[-1], weights['out']) + biases['out']
pred = BiRNN(x, weights, biases)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate model
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
step = 1
while step * batch_size < training_iters:
batch_x, batch_y = next_batch(batch_size, r_big_d, y_r_big_d)
#batch_x = batch_x.reshape((batch_size, n_steps, n_input))
# Run optimization op (backprop)
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
if step % display_step == 0:
# Calculate batch accuracy
acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y})
# Calculate batch loss
loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y})
print("Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
"{:.6f}".format(loss) + ", Training Accuracy= " + \
"{:.5f}".format(acc))
step += 1
print("Optimization Finished!")
test_x, test_y = next_batch(batch_size, v_big_d, y_v_big_d)
print("Testing Accuracy:", \
sess.run(accuracy, feed_dict={x: test_x, y: test_y}))
The first return value of static_bidirectional_rnn is a list of tensors - one for each rnn step. By using only the last one in your tf.matmul you're losing all the rest. Instead, stack them into a single tensor of the appropriate shape, reshape for the matmul then shape back.
outputs = tf.stack(outputs, axis=1)
outputs = tf.reshape(outputs, (batch_size*n_steps, n_hidden))
outputs = tf.matmul(outputs, weights['out']) + biases['out']
outputs = tf.reshape(outputs, (batch_size, n_steps, n_classes))
Alternatively, you could use tf.einsum:
outputs = tf.stack(outputs, axis=1)
outputs = tf.einsum('ijk,kl->ijl', outputs, weights['out']) + biases['out']

Shapes must be equal rank, but are 2 and 1

I'm following the example of Sentdex on youtube and here is the code I have
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot = True)
n_nodes_hl1 = 500
n_nodes_hl2 = 500
n_nodes_hl3 = 500
n_classes = 10
batch_size = 100
x = tf.placeholder('float', [None, 784])
y = tf.placeholder('float')
def neural_network_model(data):
hidden_1_layer = {'weights':tf.Variable(tf.random_normal([784, n_nodes_hl1])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl1]))}
hidden_2_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl2]))}
hidden_3_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl3]))}
output_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl3, n_classes])),
'biases':tf.Variable(tf.random_normal([n_classes])),}
l1 = tf.add(tf.matmul(data,hidden_1_layer['weights']), hidden_1_layer['biases'])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1,hidden_2_layer['weights']), hidden_2_layer['biases'])
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2,hidden_3_layer['weights']), hidden_3_layer['biases'])
l3 = tf.nn.relu(l3)
output = tf.matmul(l3,output_layer['weights']) + output_layer['biases']
return output
def train_neural_network(x):
prediction = neural_network_model(x)
# OLD VERSION:
#cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(prediction,y) )
# NEW:
cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y) )
optimizer = tf.train.AdamOptimizer().minimize(cost)
hm_epochs = 10
with tf.Session() as sess:
# OLD:
#sess.run(tf.initialize_all_variables())
# NEW:
sess.run(tf.global_variables_initializer())
for epoch in range(hm_epochs):
epoch_loss = 0
for _ in range(int(mnist.train.num_examples/batch_size)):
epoch_x, epoch_y = mnist.train.next_batch(batch_size)
_, c = sess.run([optimizer, cost], feed_dict={x: epoch_x, y: epoch_y})
epoch_loss += c
print('Epoch', epoch, 'completed out of',hm_epochs,'loss:',epoch_loss)
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print('Accuracy:',accuracy.eval({x:mnist.test.images, y:mnist.test.labels}))
train_neural_network(x)
It raises this error :
ValueError: Shapes must be equal rank, but are 2 and 1
From merging shape 0 with other shapes. for 'SparseSoftmaxCrossEntropyWithLogits/packed' (op: 'Pack') with input shapes: [?,10], [10].
on this line:
cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y) )
I think it is about the size of y that induced the error, I tried using
cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
prediction, tf.squeeze(y)))
I'm pretty sure this means that the cost function induces the error (shown above) prediction and y are not the same shape, but I don't understand TensorFlow well enough to know how to fix it. I don't even really understand where y is being set, I got most of this code from a tutorial and fiddled with it to apply it to a different dataset. How can I fix this error?
ps I tried to print out prediction, it gives me two outputs and i guess thats where the error comes from:
prediction
(<tf.Tensor 'MatMul_39:0' shape=(?, 10) dtype=float32>,
<tf.Variable 'Variable_79:0' shape=(10,) dtype=float32_ref>)
Since you are using one_hot=True while reading the input data, just define correct shape for the y placeholder
# redefine the label and input with exact data type and shape
x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.float32, shape=[None, n_classes])
You have a comma between the closing parenthesis and the dict bracket in this statement:
output_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl3, n_classes])),'biases':tf.Variable(tf.random_normal([n_classes])),}
just before the close bracket:
...([n_classes])),}
#WORKING CODE
#I had the same problem as you, (not counting the comma) and i´m sorry i don´t remember the things i changed, but hopefully this will work
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist= input_data.read_data_sets("/tmp/data/", one_hot=True)
#10 clasees, 0-9
n_nodes_hl1=500
n_nodes_hl2=500
n_nodes_hl3=500
n_classes=10
batch_size=100
x=tf.placeholder('float',[None,784])
y=tf.placeholder('float')
def neural(data):
hidden_1_layer={'weights':tf.Variable(tf.random_normal([784, n_nodes_hl1])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl1]))}
hidden_2_layer={'weights':tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl2]))}
hidden_3_layer={'weights':tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl3]))}
output_layer={'weights':tf.Variable(tf.random_normal([n_nodes_hl3, n_classes])),
'biases':tf.Variable(tf.random_normal([n_classes]))}
l1=tf.add(tf.matmul(data, hidden_1_layer['weights']), hidden_1_layer['biases'])
li= tf.nn.relu(l1)
l2=tf.add(tf.matmul(l1, hidden_2_layer['weights']), hidden_2_layer['biases'])
l2= tf.nn.relu(l2)
l3=tf.add(tf.matmul(l2, hidden_3_layer['weights']), hidden_3_layer['biases'])
l3= tf.nn.relu(l3)
output= tf.matmul(l3, output_layer['weights'])+ output_layer['biases']
return output
def train(x):
prediction=neural(x)
cost= tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction,labels=y))
optimizer=tf.train.AdamOptimizer().minimize(cost)
hm_epochs=20
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(hm_epochs):
epoch_loss=0
for _ in range(int(mnist.train.num_examples/batch_size)):
epoch_x,epoch_y = mnist.train.next_batch(batch_size)
_,c=sess.run([optimizer,cost],feed_dict={x: epoch_x, y: epoch_y})
epoch_loss += c
print('Epoch', epoch, 'completed out of', hm_epochs, 'loss:',epoch_loss)
correct= tf.equal(tf.argmax(prediction,1), tf.argmax(y,1))
accuracy= tf.reduce_mean(tf.cast(correct,'float'))
print('Accuracy:',accuracy.eval({x:mnist.test.images, y:mnist.test.labels}))
train(x)

Illegal argument error: logits and labels must be same size

I am getting an error when calculating the cost function using softmax. It says that the shape of my logits and labels are not match
InvalidArgumentError (see above for traceback): logits and labels must be same size: logits_size=[1000,2] labels_size=[1,1000]
[[Node: SoftmaxCrossEntropyWithLogits = SoftmaxCrossEntropyWithLogits[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/cpu:0"](Reshape, Reshape_1)]]
Even if I do reshape or transpose the shape cannot match as logits is [1000,2] size and labels is [1000,1]. How do I tackle this issue?
n_nodes_hl1 = 250
n_nodes_hl2 = 250
n_classes = 2
batch_size = 1000
with open("xdf.pickle", 'rb') as f:
features = pickle.load(f)
with open("ydf.pickle", 'rb') as f:
labels = pickle.load(f)
def neural_network_model(data, feature_count):
hidden_layer_1 = {'weights': tf.Variable(tf.random_normal([feature_count, n_nodes_hl1])),
'biases': tf.Variable(tf.random_normal([n_nodes_hl1]))}
hidden_layer_2 = {'weights': tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
'biases': tf.Variable(tf.random_normal([n_nodes_hl2]))}
output_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl2, n_classes])),
'biases': tf.Variable(tf.random_normal([n_classes])), }
l1 = tf.add(tf.matmul(data, hidden_layer_1['weights']), hidden_layer_1['biases'])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1, hidden_layer_2['weights']), hidden_layer_2['biases'])
l2 = tf.nn.relu(l2)
output = tf.matmul(l2, output_layer['weights']) + output_layer['biases']
return output
def train_neural_network(x, y, features, labels):
X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=0.2)
prediction = neural_network_model(x, len(features.columns))
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
hm_epochs = 1
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for epoch in range(hm_epochs):
epoch_loss = 0
for i in range(int(len(X_train) / batch_size)):
epoch_x = X_train[i*batch_size: min((i + 1)*batch_size, len(X_train))]
epoch_y = y_train[i*batch_size: min((i + 1)*batch_size, len(y_train))]
i, c = sess.run([optimizer, cost], feed_dict = {x:epoch_x, y:epoch_y})
epoch_loss += c
print('Epoch', epoch, ' completed out of ', hm_epochs, ' loss: ', epoch_loss)
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print('Accuracy: ', accuracy.eval({x: X_test, y: y_test}))
x = tf.placeholder('float', [None, len(features.columns)])
y = tf.placeholder('float')
train_neural_network(x, y, features, labels)
Since I do not know what data you have, I can only guess. Your network has n_classes output neurons (2 in your case), but I assume that your labels are binary (0 or 1). You will either have to reduce the number of output neurons to 1 (this should work since you only have two classes), or convert your labels into one-hot labels ([1,0] for a label 0 and [0,1] for a label 1).
You could also try using tf.nn.sparse_softmax_cross_entropy_with_logits(), maybe it will work, so that you don't have to change the rest of the network...

How to use RNN tensorflow to learning one-Dimensional Data? AttributeError: 'numpy.ndarray' object has no attribute 'batch'

The one-D data concludes 80 samples, with everyone is 1089 length. I want to use 70 samples to training and 10 samples to testing.
I am totally beginner in python and tensorflow, so I use the code which is processing image(which is two-dimension). Here is the code I use(all the parameters are pretty low for I just want to test the code):
import tensorflow as tf
import scipy.io as sc
from tensorflow.python.ops import rnn, rnn_cell
# data read
feature_training = sc.loadmat("feature_training.mat")
feature_training = feature_training['feature_training']
print (feature_training.shape)
feature_testing = sc.loadmat("feature_testing.mat")
feature_testing = feature_testing['feature_testing']
print (feature_testing.shape)
label_training = sc.loadmat("label_training.mat")
label_training = label_training['label_training']
print (label_training.shape)
label_testing = sc.loadmat("label_testing.mat")
label_testing = label_testing['label_testing']
print (label_testing.shape)
# parameters
learning_rate = 0.1
training_iters = 100
batch_size = 70
display_step = 10
# network parameters
n_input = 70 # MNIST data input (img shape: 28*28)
n_steps = 100 # timesteps
n_hidden = 10 # hidden layer num of features
n_classes = 2 # MNIST total classes (0-9 digits)
# tf Graph input
x = tf.placeholder("float", [None, n_steps, n_input])
y = tf.placeholder("float", [None, n_classes])
# Define weights
weights = {
'out': tf.Variable(tf.random_normal([n_hidden, n_classes]))
}
biases = {
'out': tf.Variable(tf.random_normal([n_classes]))
}
def RNN(x, weights, biases):
# Prepare data shape to match `rgnn` function requirements
# Current data input shape: (batch_size, n_steps, n_input)
# Required shape: 'n_steps' tensors list of shape (batch_size, n_input)
# Permuting batch_size and n_steps
x = tf.transpose(x, [1, 0, 2])
# Reshaping to (n_steps*batch_size, n_input)
x = tf.reshape(x, [-1, n_input])
# Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
x = tf.split(0, n_steps, x)
# Define a lstm cell with tensorflow
lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
# Get lstm cell output
outputs, states = rnn.rnn(lstm_cell, x, dtype=tf.float32)
# Linear activation, using rnn inner loop last output
return tf.matmul(outputs[-1], weights['out']) + biases['out']
pred = RNN(x, weights, biases)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate model
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initializing the variables
init = tf.initialize_all_variables()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
step = 1
# Keep training until reach max iterations
while step * batch_size < training_iters:
batch_x, batch_y = feature_training.next_batch(batch_size)
# Reshape data to get 28 seq of 28 elements
batch_x = batch_x.reshape((batch_size, n_steps, n_input))
# Run optimization op (backprop)
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
if step % display_step == 0:
# Calculate batch accuracy
acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y})
# Calculate batch loss
# loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y})
print ("Iter " + str(step*batch_size) + ", Training Accuracy= " +
"{:.5f}".format(acc))
step += 1
print ("Optimization Finished!")
# Calculate accuracy for 10 testing data
test_len = 10
test_data = feature_testing[:test_len].reshape((-1, n_steps, n_input))
test_label = label_testing[:test_len]
print ("Testing Accuracy:",
sess.run(accuracy, feed_dict={x: test_data, y: test_label}))
At last, it turns out the Error:
Traceback (most recent call last):
File "/home/xiangzhang/MNIST data test.py", line 92, in <module>
batch_x, batch_y = feature_training.batch(batch_size)
AttributeError: 'numpy.ndarray' object has no attribute 'next_batch'
I thought it must be related with the dimension of the data, but I do not know how to fix it. Please help me, thanks very much.