InvalidArgumentError while coding MNIST tutorial - tensorflow

these are my first tensorflow steps, and I would like if somebody else had the same issues as me and if there is a way around it.
I am coding the mnist tutorial and my current code-snippet is:
#placeholder for input
x = tf.placeholder(tf.float32,[None,784]) # None means a dimension can be of any length
#Weights for the model: 784 pixel maps to ten results
W = tf.Variable(tf.zeros([784,10]))
#bias
b = tf.Variable( tf.zeros([10]))
#implementing the model
y = tf.matmul(x,W) + b
#implementing cross-entropy
y_ = tf.placeholder(tf.float32,[None,10])
#cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
sess=tf.InteractiveSession()
tf.global_variables_initializer().run()
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
for _ in range(1000):
batch_xs, batch_xy64 = mnist.train.next_batch(100)
batch_xy = batch_xy64.astype(np.float32)
sess.run(train_step , feed_dict={x:batch_xs,y:batch_xy})
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
print (sess.run(accuracy,feed_dict={x:mnist.test.images, y_:mnist.test.labels}))
First I tried cross_entropy from the MNIST description and the the one in the provided source-code, which made no difference.
Note that I explicitly try to cast the batch_xy, as it is returned as a float 64.
This also seems to be the problem, as in the session.run float32 tensors and variables seem to be expected.
As far as I saw debugging the code, the labes in the mnist are returned as float64 - perhaps that explains my error:
...
File "/home/braunalx/python-workspace/LearnTensorFlow/firstSteps/MNIST_Start.py", line 40, in mnist_run
y_ = tf.placeholder(tf.float32,[None,10])
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/array_ops.py", line 1548, in placeholder
return gen_array_ops._placeholder(dtype=dtype, shape=shape, name=name)
...
InvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'Placeholder_1' with dtype float and shape [?,10]
[[Node: Placeholder_1 = Placeholder[dtype=DT_FLOAT, shape=[?,10], _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
Is there any issue with the provided mnist data?

The error says that you didn't feed a value for a placeholder that is needed. Replace y with y_ on this line sess.run(train_step , feed_dict={x:batch_xs,y:batch_xy}).

Related

You must feed a value for placeholder tensor 'Placeholder_2' with dtype float and shape [?,10]

I don't know why occur this problem,I have checked many times, I have feed xs and ys to feed_dict. So, what is the reason for this problem? How do I modify my code to solve these error? Below is the error log.
InvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'Placeholder_2' with dtype float and shape [?,10]
[[node Placeholder_2 (defined at /home/jiayu/dropout.py:41) = Placeholder[dtype=DT_FLOAT, shape=[?,10], _device="/job:localhost/replica:0/task:0/device:GPU:0"]()]]
[[{{node Mean_5/_55}} = _Recv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/device:CPU:0", send_device="/job:localhost/replica:0/task:0/device:GPU:0", send_device_incarnation=1, tensor_name="edge_271_Mean_5", tensor_type=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:CPU:0"]()]]
This code run on ubuntu 16.04, tensorflow 1.12.0 and python 3.6.8.
from __future__ import print_function
import tensorflow as tf
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
# load data
digits = load_digits()
X = digits.data
y = digits.target
y = LabelBinarizer().fit_transform(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.3)
def add_layer(inputs, in_size, out_size, layer_name, activation_function=None, ):
# add one more layer and return the output of this layer
Weights = tf.Variable(tf.random_normal([in_size, out_size]))
biases = tf.Variable(tf.zeros([1, out_size]) + 0.1, )
Wx_plus_b = tf.matmul(inputs, Weights) + biases
# here to dropout
Wx_plus_b = tf.nn.dropout(Wx_plus_b, keep_prob)
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b, )
tf.summary.histogram(layer_name + '/outputs', outputs)
return outputs
# define placeholder for inputs to network
keep_prob = tf.placeholder(tf.float32)
xs = tf.placeholder(tf.float32, [None, 64]) # 8x8
ys = tf.placeholder(tf.float32, [None, 10])
# add output layer
l1 = add_layer(xs, 64, 50, 'l1', activation_function=tf.nn.tanh)
prediction = add_layer(l1, 50, 10, 'l2', activation_function=tf.nn.softmax)
# the loss between prediction and real data
cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.log(prediction),reduction_indices=[1])) # loss
tf.summary.scalar('loss', cross_entropy)
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
sess = tf.Session()
merged = tf.summary.merge_all()
# summary writer goes in here
train_writer = tf.summary.FileWriter("logs/train", sess.graph)
test_writer = tf.summary.FileWriter("logs/test", sess.graph)
# tf.initialize_all_variables() no long valid from
# 2017-03-02 if using tensorflow >= 0.12
if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1:
init = tf.initialize_all_variables()
else:
init = tf.global_variables_initializer()
sess.run(init)
for i in range(500):
# here to determine the keeping probability
sess.run(train_step, feed_dict={xs: X_train, ys: y_train, keep_prob: 1})
if i % 50 == 0:
# record loss
train_result = sess.run(merged, feed_dict={xs: X_train, ys: y_train, keep_prob: 1})
test_result = sess.run(merged, feed_dict={xs: X_test, ys: y_test, keep_prob: 1})
train_writer.add_summary(train_result, i)
test_writer.add_summary(test_result, i)
The right result is display scale in tensorboard.
You cannot run the script more than once because otherwise you are creating nested graph
For the first run, it will run OK without any errors. But when you run it more than once, nested computation graph will be created. You can view the behavior in tensorboard, after several runs, the computation graph will get bigger and bigger, and when you try to evaluate the bigger graph, extra placeholders simply don't get data fed to them and they will give error.
Here is the simple solution. Use ft.reset_default_graph() and put it before the place where you create the graph
tf.reset_default_graph()
# define placeholder for inputs to network
keep_prob = tf.placeholder(tf.float32, name='prob')
xs = tf.placeholder(tf.float32, [None, 64], name='x_input') # 8x8
ys = tf.placeholder(tf.float32, [None, 10], name='y_input')
...
some further reading Remove nodes from graph or reset entire default graph

I cant test a trained model in tensorflow

I have a DNC model built in tensor flow, after training, now I want to test it against test data I tried everything, but it seems that tensor flow is always requiring the training data to feed the tensor.
with tf.Session(graph=graph) as sess:
# initialize input output pairs
tf.initialize_all_variables().run()
final_i_data = X_train
final_o_data = y_train
# for each iteration
for i in range(0, iterations + 1):
# feed in each input output pair
feed_dict = {dnc.i_data: final_i_data, dnc.o_data: final_o_data}
# make predictions
l, _, predictions = sess.run([loss, optimizer, output], feed_dict=feed_dict)
if i % 100 == 0:
print(i, l)
for x in X_test:
x= np.reshape(x,(1,24))
feed_dict= {dnc.tf_test_dataset: x}
predictions = sess.run(test_output, feed_dict=feed_dict)
print(predictions)
I have this error everytime :
InvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'Placeholder' with dtype float and shape [6,24]
[[Node: Placeholder = Placeholder[dtype=DT_FLOAT, shape=[6,24], _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
In my graph I have tf_test_dataset as a placeholder of size (1,24) but the error requires me to feed the placeholder of training data. PLEASE HELP!

How can I test the Deep MNIST for Experts code on a single image?

I am just starting out with tensorflow and I want to test the trained model from tensorflow's tutorial on my own images. This is the code I used to test the Softmax Regression Model in the beginning of the tutorial on my own images:
with open("three.jpeg", "rb") as f:
contents = f.read()
image = tf.image.decode_jpeg(contents, channels=1)
image_float = tf.image.convert_image_dtype(image, tf.float32)
resized_image = tf.image.resize_images(image_float, [28, 28])
resized_image = tf.reshape(resized_image, [784])
img = 1 - resized_image.eval()
classification = sess.run(tf.argmax(y, 1), feed_dict={x: [img]})
plt.imshow(img.reshape(28, 28), cmap=plt.cm.binary)
plt.show()
print ('NN predicted', classification[0])
Which worked fine for the softmax function but not for the Multilayer Convolutional Network. I tried changing y in this line
classification = sess.run(tf.argmax(y, 1), feed_dict={x: [img]})
to y_conv but it gave me this error:
InvalidArgumentError: You must feed a value for placeholder tensor
'Placeholder_2' with dtype float [[Node: Placeholder_2 =
Placeholderdtype=DT_FLOAT, shape=,
_device="/job:localhost/replica:0/task:0/cpu:0"]]
There is a placeholder somewhere in your graph which you are not feeding. Odds are you need another x in your feed_dict, for the other network.

Tensorflow reshaping error for 1d convolutional neural net

I am building a 1d convolutional neural net for my own data (spectra) and am having an issue with tf.reshape. First I load in the data with pandas, and convert these to numpy arrays, composed of 708 training example spectra, each of length 2151,
import pandas as pd
import numpy as np
data = pd.read_csv('test.csv',header=None)
yTrue = data.ix[:,0].as_matrix()
data = data - data.mean()
data = data.ix[:,1:].as_matrix()
where I subtract the mean value in each column. So data is of dimensions 708 x 2151 here. I then create a network that starts with,
sess = tf.InteractiveSession()
## define inputs
x_ = tf.placeholder(tf.float32, shape=[None, 2151])
x_ = tf.reshape(x_, [-1,1,2151,1])
y_ = tf.placeholder(tf.float32, shape=[None])
which are inputs for my 1d convolutional neural net (with kernels with a width of 10, and 32 feature maps),
W_conv1 = weight_variable([1, 10, 1, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
I then build the rest of the network and then try to run ADAM on it,
cost_function = tf.reduce_mean(tf.pow(y_out - y_, 2))/(2 * samples_number) #L2 loss
train_step = tf.train.AdamOptimizer(1e-4).minimize(cost_function)
correct_prediction = tf.equal(tf.argmax(y_out,1), tf.argmax(y_,1))
sess.run(tf.initialize_all_variables())
for i in range(20000):
print(i)
sess.run(train_step, feed_dict={x_: data, y_: yTrue})
However I get the following error:
ValueError: Cannot feed value of shape (708, 2151) for Tensor u'Reshape_26:0',
which has shape '(?, 1, 2151, 1)'
I have looked at these answers: TensorFlow/TFLearn: ValueError: Cannot feed value of shape (64,) for Tensor u'target/Y:0', which has shape '(?, 10)'; Tensorflow error using my own data which suggest that I need to be doing some reshaping before I pass my data to the network. However, I am not sure what this should be? Particularly since the following works on the first row of the data,
t = tf.constant(data[0])
tf.reshape(t,[1,1,2151,1])
Does anyone have any ideas here?
Best,
Ben
The issue is that feed_dict can replace any Tensor, and since you've changed x_ to reference the reshape op, that's the thing that it's trying to replace. It should work if you just use different Python variables to reference the placeholder and the reshape op:
x_placeholder_ = tf.placeholder(tf.float32, shape=[None, 2151])
x_ = tf.reshape(x_placeholder_, [-1,1,2151,1])
Then when feeding, use x_placeholder_:
sess.run(train_step, feed_dict={x_placeholder_: data, y_: yTrue})

Cannot make array in using tensorflow

In feeding batch_xs to x, I reshaped batch_xs, for BATCH_SIZE is 1.
Here is my source.
I'm not sure what is making the ValueError.
with tf.name_scope("input") as scope:
x = tf.placeholder(tf.float32, shape=[1, 784])
BATCH_SIZE = 1
DROP_OUT_RATE = 0.4
EPOCH = 1
MEMORIZE = 10
accuracy_array = []
loss = tf.nn.l2_loss(y - x) / BATCH_SIZE
train_step = tf.train.AdamOptimizer(1e-4).minimize(loss)
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
mnist_list = make_mnist_train_list(55000, 10)
test_list = make_mnist_test_list(5000, 10)
sess = tf.Session()
sess.run(tf.initialize_all_variables())
for i in range(EPOCH):
for j in range(5500/BATCH_SIZE):
batch_xs = tf.reshape(mnist_list[0][j*BATCH_SIZE:j*BATCH_SIZE+1], [1, 784])
sess.run(train_step, feed_dict={x: batch_xs, keep_prob: (1.0 - DROP_OUT_RATE), r_keep_prob: (1.0 - DROP_OUT_RATE)})
if (i +1)% MEMORIZE == 0:
accuracy_array.append(loss.eval(session=sess, feed_dict={x: batch_xs, keep_prob: 1.0, r_keep_prob: 1.0}))
print(accuracy_array[ int(math.floor((i+1)/MEMORIZE -1))])
This gives me the Value error, which doesn't make sense to me.
ValueError: Argument must be a dense tensor
From the documentation here :
Each key in feed_dict can be one of the following types:
If the key is a Tensor, the value may be a Python scalar, string, list, or numpy ndarray that can be converted to the same dtype as that tensor. Additionally, if the key is a placeholder, the shape of the value will be checked for compatibility with the placeholder.
If the key is a SparseTensor, the value should be a SparseTensorValue.
The types that you can use as the "value" for a key in feed_dict should be Python primitive types or numpy arrays. You are using the result of tf.reshape, which is a TensorFlow Tensor type. You can simply use np.reshape if you want to feed a reshaped array.