Tensorflow error in feed_dict - tensorflow

I have the following error.
Cannot feed value of shape (525, 3) for Tensor 'Placeholder_31:0', which has shape '(?, 2)'
Here is my code:
data=pd.read_csv('/Users/kselvan/Desktop/cancer.csv',names=["A","B","C","D","E","F","G","H","I","J","K"])
s=np.asarray([1,0,0])
ve=np.asarray([0,1,0])
data['K'] = data['K'].map({2: s, 4: ve})
data=data.iloc[np.random.permutation(len(data))]
data=data.reset_index(drop=True)
#training data
x_input=data.loc[0:524,["A","B","C","D","E","F","G","H","I","J"]]
temp=data['K']
y_input=temp[0:525]
#test data
x_test=data.loc[525:698,["A","B","C","D","E","F","G","H","I","J"]]
y_test=temp[525:699]
#placeholders and variables. input has 4 features and output has 3 classes
x=tf.placeholder(tf.float32,shape=[None,10])
y_=tf.placeholder(tf.float32,shape=[None,2])
print(x.shape)
print(y.shape)
#weight and bias
W=tf.Variable(tf.zeros([10,2]))
b=tf.Variable(tf.zeros([2]))
# model
#softmax function for multiclass classification
y = tf.nn.softmax(tf.matmul(x, W) + b)
#loss function
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
#optimiser -
train_step = tf.train.AdamOptimizer(0.01).minimize(cross_entropy)
#calculating accuracy of our model
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
#session parameters
sess = tf.InteractiveSession()
#initialising variables
init = tf.global_variables_initializer()
sess.run(init)
#number of interations
epoch=2000
for step in range(epoch):
_, c=sess.run([train_step,cross_entropy], feed_dict={x: x_input, y_:[t for t in y_input.as_matrix()]})
if step%500==0 :
print(c)
As i am new to tensorflow i cat figure out whats the mistake. Can anyone help me sorting it out?

You are declaring placeholder with the shape (?,2) with next line:
y_=tf.placeholder(tf.float32,shape=[None,2])
however you problem is classification with 3 classes. So you should change your y_,W and b to following:
y_=tf.placeholder(tf.float32,shape=[None,3])
W=tf.Variable(tf.zeros([10,3]))
b=tf.Variable(tf.zeros([3]))

Actually, you defined the wrong shape of your weight and bais so change the dimension of weight and bais according to your network architecture, it should be like this
y = tf.placeholder(tf.float32,shape=[None,number_of_classes])
w = tf.Variable(tf.zeros([input_tensor_shape,output_tensor_shape]))
b = tf.Variable(tf.zeros([number_of_classes]))

Related

In tensorflow 1, when the loss function is defined with operations on Tensors, is the model really trained?

First, I m sorry but it's not possible to reproduce this problem on a few lines, as the model involved is a very complex network.
But here is an idea of the code:
def return_iterator(data, nb_epochs, batch_size):
dataset = tf.data.Dataset.from_tensor_slices(data)
dataset = dataset.repeat(nb_epochs).batch(batch_size)
iterator = dataset.make_one_shot_iterator()
yy = iterator.get_next()
return tf.cast(yy, tf.float32)
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
y_pred = complex_model.autoencode(train)
y_pred = tf.convert_to_tensor(y_pred, dtype=tf.float32)
nb_epochs = 10
batch_size = 64
y_real = return_iterator(train, nb_epochs, batch_size)
y_pred = return_iterator(y_pred, nb_epochs, batch_size)
res_equal = 1. - tf.reduce_mean(tf.abs(y_pred - y_real), [1,2,3])
loss = 1 - tf.reduce_sum(res_equal, axis=0)
opt = tf.train.AdamOptimizer().minimize(loss)
tf.global_variables_initializer().run()
for epoch in range(0, nb_epochs):
_, d_loss = sess.run([opt, loss])
To define the loss, I must use operations like tf.reduce_mean and tf.reduce_sum , and these operations only accept Tensors as input.
My question is: with this code, will the complex_model autoencoder be trained during the training ? (eventhough here, it's just used to output the predictions to compute the loss)
Thank you
p.s: I am using TF1.15 (and I cannot use another version)

tensorflow - linear regression does not give intended computational graph

I am trying to train a very simple linear regression with tensorflow but the loss doesn't decrease and the tensorboard also doesn't look right
### Generate data
w_true = np.array([1.0,2.0])
b_true = 0.5
x_train = np.random.multivariate_normal(mean=[0,0], cov=[[1,0],[0,1]], size=100)
x_test = np.random.multivariate_normal(mean=[0,0], cov=[[3,0],[0,3]], size=100)
y_train = np.dot(x_train,w_true) + b_true
y_test = np.dot(x_test,w_true) + b_true
### Placeholders for data input
x = tf.placeholder(dtype=tf.float32, shape=[None,2], name="x")
y = tf.placeholder(dtype=tf.float32, shape=[None], name="labels")
### Trainable parameters
w = tf.Variable(initial_value=np.random.multivariate_normal([0,0],[[1,0],[0,1]]), dtype=tf.float32,
name="W")
b = tf.Variable(initial_value=np.random.normal(1), dtype=tf.float32,name="B")
### Computational graph
y_pred = tf.tensordot(x,w,1)+b
tf.summary.histogram("weights",w)
tf.summary.histogram("bias",b)
loss = tf.reduce_sum(tf.squared_difference(y,y_pred), name="loss")
tf.summary.scalar("loss", loss)
with tf.name_scope("train"):
train_step = tf.train.GradientDescentOptimizer(0.00001).minimize(loss)
### Training
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# For TensorBoard
writer = tf.summary.FileWriter("path_to_some_folder")
writer.add_graph(sess.graph)
for t in range(1000):
x_batch = x_train[np.random.choice(100, 20)]
y_batch = y_train[np.random.choice(100, 20)]
sess.run(train_step, {x:x_batch,y:y_batch})
print(sess.run(loss, {x:x_train,y:y_train}))
print(sess.run(loss, {x:x_test,y:y_test}))
I have tried different step sizes but the error always stays above 400 on the training and 1000 on the test set. I have tested that tf.tensordot() behaves like I expect. I you would like to see the tensorboard just replace the path_to_some_folder and after training run tensorboard --logdir path_to_some_folder
Thanks very much for the help
Your problem is because of the following two lines,
x_batch = x_train[np.random.choice(100, 20)]
y_batch = y_train[np.random.choice(100, 20)]
In each iteration, np.random.choice(100, 20) returns two different index lists for x_batch and y_batch. Therefore, your x_batch and y_batch will never match. Instead, replace that part with the following code.
BATCH_SIZE= 10
N_COUNT = len(x_train)
for t in range(1000):
for start, end in zip(range(0, N_COUNT, BATCH_SIZE),
range(BATCH_SIZE, N_COUNT + 1,BATCH_SIZE)):
x_batch = x_train[start:end]
y_batch = y_train[start:end]
sess.run(train_step, {x:x_batch,y:y_batch})
Hope this helps.

Siamese Model with LSTM network fails to train using tensorflow

Dataset Description
The dataset contains a set of question pairs and a label which tells if the questions are same. e.g.
"How do I read and find my YouTube comments?" , "How can I see all my
Youtube comments?" , "1"
The goal of the model is to identify if the given question pair is same or different.
Approach
I have created a Siamese network to identify if two questions are same. Following is the model:
graph = tf.Graph()
with graph.as_default():
embedding_placeholder = tf.placeholder(tf.float32, shape=embedding_matrix.shape, name='embedding_placeholder')
with tf.variable_scope('siamese_network') as scope:
labels = tf.placeholder(tf.int32, [batch_size, None], name='labels')
keep_prob = tf.placeholder(tf.float32, name='question1_keep_prob')
with tf.name_scope('question1') as question1_scope:
question1_inputs = tf.placeholder(tf.int32, [batch_size, seq_len], name='question1_inputs')
question1_embedding = tf.get_variable(name='embedding', initializer=embedding_placeholder, trainable=False)
question1_embed = tf.nn.embedding_lookup(question1_embedding, question1_inputs)
question1_lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
question1_drop = tf.contrib.rnn.DropoutWrapper(question1_lstm, output_keep_prob=keep_prob)
question1_multi_lstm = tf.contrib.rnn.MultiRNNCell([question1_drop] * lstm_layers)
q1_initial_state = question1_multi_lstm.zero_state(batch_size, tf.float32)
question1_outputs, question1_final_state = tf.nn.dynamic_rnn(question1_multi_lstm, question1_embed, initial_state=q1_initial_state)
scope.reuse_variables()
with tf.name_scope('question2') as question2_scope:
question2_inputs = tf.placeholder(tf.int32, [batch_size, seq_len], name='question2_inputs')
question2_embedding = question1_embedding
question2_embed = tf.nn.embedding_lookup(question2_embedding, question2_inputs)
question2_lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
question2_drop = tf.contrib.rnn.DropoutWrapper(question2_lstm, output_keep_prob=keep_prob)
question2_multi_lstm = tf.contrib.rnn.MultiRNNCell([question2_drop] * lstm_layers)
q2_initial_state = question2_multi_lstm.zero_state(batch_size, tf.float32)
question2_outputs, question2_final_state = tf.nn.dynamic_rnn(question2_multi_lstm, question2_embed, initial_state=q2_initial_state)
Calculate the cosine distance using the RNN outputs:
with graph.as_default():
diff = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(question1_outputs[:, -1, :], question2_outputs[:, -1, :])), reduction_indices=1))
margin = tf.constant(1.)
labels = tf.to_float(labels)
match_loss = tf.expand_dims(tf.square(diff, 'match_term'), 0)
mismatch_loss = tf.expand_dims(tf.maximum(0., tf.subtract(margin, tf.square(diff)), 'mismatch_term'), 0)
loss = tf.add(tf.matmul(labels, match_loss), tf.matmul((1 - labels), mismatch_loss), 'loss_add')
distance = tf.reduce_mean(loss)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(distance)
Following is the code to train the model:
with graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=graph) as sess:
sess.run(tf.global_variables_initializer(), feed_dict={embedding_placeholder: embedding_matrix})
iteration = 1
for e in range(epochs):
summary_writer = tf.summary.FileWriter('/Users/mithun/projects/kaggle/quora_question_pairs/logs', sess.graph)
summary_writer.add_graph(sess.graph)
for ii, (x1, x2, y) in enumerate(get_batches(question1_train, question2_train, label_train, batch_size), 1):
feed = {question1_inputs: x1,
question2_inputs: x2,
labels: y[:, None],
keep_prob: 0.9
}
loss1 = sess.run([distance], feed_dict=feed)
if iteration%5==0:
print("Epoch: {}/{}".format(e, epochs),
"Iteration: {}".format(iteration),
"Train loss: {:.3f}".format(loss1))
if iteration%50==0:
val_acc = []
for x1, x2, y in get_batches(question1_val, question2_val, label_val, batch_size):
feed = {question1_inputs: x1,
question2_inputs: x2,
labels: y[:, None],
keep_prob: 1
}
batch_acc = sess.run([accuracy], feed_dict=feed)
val_acc.append(batch_acc)
print("Val acc: {:.3f}".format(np.mean(val_acc)))
iteration +=1
saver.save(sess, "checkpoints/quora_pairs.ckpt")
I have trained the above model with about 10,000 labeled data. But, the accuracy is stagnant at around 0.630 and strangely the validation accuracy is same across all the iterations.
lstm_size = 64
lstm_layers = 1
batch_size = 128
learning_rate = 0.001
Is there anything wrong with the way I have created the model?
This is a common problem with imbalanced datasets like the recently released Quora dataset which you are using. Since the Quora dataset is imbalanced (~63% negative and ~37% positive examples) you need proper initialization of weights. Without weight initialization your solution will be stuck in a local minima and it will train to predict only the negative class. Hence the 63% accuracy, because that is the percentage of 'not similar' questions in your validation data. If you check the results obtained on your validation set you will notice that it predicts all zeros. A truncated normal distribution proposed in He et al., http://arxiv.org/abs/1502.01852 is a good alternate for initializing the weights.

tensor flow character recognition with softmax results in accuracy 1 due to [NaN...NaN] prediction

I am trying to use the softmax regression method discussed in https://www.tensorflow.org/get_started/mnist/beginners to recognize characters.
My code is as follows.
train_data = pd.read_csv('CharDataSet/train.csv')
print(train_data.shape)
x = tf.placeholder(tf.float32, [None, 130])
W = tf.Variable(tf.zeros([130, 26]))
b = tf.Variable(tf.zeros([26]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
y_ = tf.placeholder(tf.float32, [None, 26])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
for _ in range(10):
batch_xs = train_data.iloc[:, 2:]
print(batch_xs)
batch_ys = getencodedbatch(train_data.iloc[:, 1])
print(batch_ys)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
However, I am getting an accuracy of 1, which shouldn't be the case.
The reason why I am getting it so is because my y tensor results with an array like
[nan, ..., nan]
Can anyone explain to me what is wrong in my code?
I converted each character to a one-hot encoding using the method below
def getencodedbatch(param):
s = (param.shape[0],26)
y_encode = np.zeros(s)
row=0
# print(y_encode)
for val in param:
col = ord(val)-97
y_encode[row, col] = 1
row += 1
return pd.DataFrame(y_encode)
Here is the problem you are having:
You set your initial weights and biases to 0 (this is wrong, as your
network does not learn).
The result is that y consists of all zeros
You take the log of y.. and a log of 0 is not defined... Hence the NaN.
Good luck!
Edit to tell you how to fix it: look for an example on classifying MNIST characters and see what they do. You probably want to initialise your weights to be random normals ;)

Error in Dimension for LSTM in tflearn

I am training PTB dataset for predicting characters (i.e. character-level LSTM).
The dimension for training batches is [len(dataset) x vocabulary_size]. Here, vocabulary_size = 27 (26+1[for unk tokens and spaces or fullstops.]).
This is the code for converting to one_hot for both batches input(arrX) and labels(arrY).
arrX = np.zeros((len(train_data), vocabulary_size), dtype=np.float32)
arrY = np.zeros((len(train_data)-1, vocabulary_size), dtype=np.float32)
for i, x in enumerate(train_data):
arrX[i, x] = 1
arrY = arrX[1, :]
I am making a placeholder of input(X) and labels(Y) in Graph to pass it to tflearn LSTM.Following is the code for the graph and session.
batch_size = 256
with tf.Graph().as_default():
X = tf.placeholder(shape=(None, vocabulary_size), dtype=tf.float32)
Y = tf.placeholder(shape=(None, vocabulary_size), dtype=tf.float32)
print (utils.get_incoming_shape(tf.concat(0, Y)))
print (utils.get_incoming_shape(X))
net = tflearn.lstm(X, 512, return_seq=True)
print (utils.get_incoming_shape(net))
net = tflearn.dropout(net, 0.5)
print (utils.get_incoming_shape(net))
net = tflearn.lstm(net, 256)
net = tflearn.fully_connected(net, vocabulary_size, activation='softmax')
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(net, Y))
optimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(loss)
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
offset=0
avg_cost = 0
total_batch = (train_length-1) / 256
print ("No. of batches:", '%d' %total_batch)
for i in range(total_batch) :
batch_xs, batch_ys = trainX[offset : batch_size + offset], trainY[offset : batch_size + offset]
sess.run(optimizer, feed_dict={X: batch_xs, Y: batch_ys})
cost = sess.run(loss, feed_dict={X: batch_xs, Y: batch_ys})
avg_cost += cost/total_batch
if i % 20 == 0:
print("Step:", '%03d' % i, "Loss:", str(cost))
offset += batch_size
SO, I get the following error assert ndim >= 3, "Input dim should be at least 3."
AssertionError: Input dim should be at least 3.
How can I resolve this error? Is there any alternate solution?
Should I write separate LSTM definition?
I'm not used to these kind of datasets but have you tried using the tflearn.input_data(shape) with the tflearn.embedding layer ? If you use the embedding I suppose that you won't have to reshape your data in 3 dimension.
lstm layer takes input of shape 3-D Tensor [samples, timesteps, input dim]. You can reshape your input data to 3D. In your problem shape of trainX is [len(dataset) x vocabulary_size]. Using trainX = trainX.reshape( trainX.shape+ (1,)) shape will be changed to [len(dataset), vocabulary_size, 1]. This data can be pass to lstm by simple change in input placeholder X. Add one more dimention to placeholder by X = tf.placeholder(shape=(None, vocabulary_size, 1), dtype=tf.float32).