I am trying to train a very simple linear regression with tensorflow but the loss doesn't decrease and the tensorboard also doesn't look right
### Generate data
w_true = np.array([1.0,2.0])
b_true = 0.5
x_train = np.random.multivariate_normal(mean=[0,0], cov=[[1,0],[0,1]], size=100)
x_test = np.random.multivariate_normal(mean=[0,0], cov=[[3,0],[0,3]], size=100)
y_train = np.dot(x_train,w_true) + b_true
y_test = np.dot(x_test,w_true) + b_true
### Placeholders for data input
x = tf.placeholder(dtype=tf.float32, shape=[None,2], name="x")
y = tf.placeholder(dtype=tf.float32, shape=[None], name="labels")
### Trainable parameters
w = tf.Variable(initial_value=np.random.multivariate_normal([0,0],[[1,0],[0,1]]), dtype=tf.float32,
name="W")
b = tf.Variable(initial_value=np.random.normal(1), dtype=tf.float32,name="B")
### Computational graph
y_pred = tf.tensordot(x,w,1)+b
tf.summary.histogram("weights",w)
tf.summary.histogram("bias",b)
loss = tf.reduce_sum(tf.squared_difference(y,y_pred), name="loss")
tf.summary.scalar("loss", loss)
with tf.name_scope("train"):
train_step = tf.train.GradientDescentOptimizer(0.00001).minimize(loss)
### Training
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# For TensorBoard
writer = tf.summary.FileWriter("path_to_some_folder")
writer.add_graph(sess.graph)
for t in range(1000):
x_batch = x_train[np.random.choice(100, 20)]
y_batch = y_train[np.random.choice(100, 20)]
sess.run(train_step, {x:x_batch,y:y_batch})
print(sess.run(loss, {x:x_train,y:y_train}))
print(sess.run(loss, {x:x_test,y:y_test}))
I have tried different step sizes but the error always stays above 400 on the training and 1000 on the test set. I have tested that tf.tensordot() behaves like I expect. I you would like to see the tensorboard just replace the path_to_some_folder and after training run tensorboard --logdir path_to_some_folder
Thanks very much for the help
Your problem is because of the following two lines,
x_batch = x_train[np.random.choice(100, 20)]
y_batch = y_train[np.random.choice(100, 20)]
In each iteration, np.random.choice(100, 20) returns two different index lists for x_batch and y_batch. Therefore, your x_batch and y_batch will never match. Instead, replace that part with the following code.
BATCH_SIZE= 10
N_COUNT = len(x_train)
for t in range(1000):
for start, end in zip(range(0, N_COUNT, BATCH_SIZE),
range(BATCH_SIZE, N_COUNT + 1,BATCH_SIZE)):
x_batch = x_train[start:end]
y_batch = y_train[start:end]
sess.run(train_step, {x:x_batch,y:y_batch})
Hope this helps.
Related
I am new to tensorflow and I hope you can help me.
I have built a tensorflow CNN network and trained it successfully. The training datasets are matlab arrays. Now I would like to use the trained network to run inference. I am not sure how to write the python code for inference.
During training, I saved the mode. I am not sure how to load the model in inference.
My inference data is also a matlab array, same as training data. How can I use it? During training, I used miniPatch from Tensorlayer, should I use miniPatch in inference two?
Below is my inference code: it gave a lot of errors:
print("\n\nPreparing testing data........................")
test_data = sio.loadmat('MyTest.mat')
Z0 = test_data['Real_testing1']
img_num_test = Z0.shape[0]
X_test = np.empty([img_num_test, 128, 128, 1], dtype=float)
X_test[:,:,:,0] = Z0
Y_test = np.column_stack((np.ones([img_num_test, 1], dtype=int),np.zeros([img_num_test, 1], dtype=int)))
print("\tTesting X shape: {0}".format(X_test.shape))
print("\tTesting Y shape: {0}".format(Y_test.shape))
print("\n\Restore the network ...")
save_dir = "checkpoints/";
epoch = 1000
model_name = save_dir + str(epoch) + '_model'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
saver = tf.train.Saver().restore(sess, save_path=model_name)
start_time_begin = time.time()
print("\n\Running network...")
start_time = time.time()
y = model.Scribenet(X_test[0, :, :, :], False, 1.0)
y = sess.run([y], feed_dict=feed_dict)
print(y[0:9])
sess.close()
Below is my training code:
x = tf.placeholder(tf.float32, shape=[None, 128, 128, 1], name='x')
y_ = tf.placeholder(tf.int64, shape=[None, 2], name='y_')
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
is_training = tf.placeholder(tf.bool, name='is_traininng')
net_in = x
net_out = model.MyCNN(net_in, is_training, keep_prob)
y = net_out
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y, labels=y_, name='cost'))
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
y_op = tf.argmax(tf.nn.softmax(y),1)
train_op = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999,
epsilon=1e-08, use_locking=False).minimize(cost)
sess.run(tf.global_variables_initializer())
save_dir = "checkpoints/";
if not os.path.exists(save_dir):
os.makedirs(save_dir)
saver = tf.train.Saver()
print("\n\nStart training the network ...")
start_time_begin = time.time()
for epoch in range(n_epoch):
start_time = time.time()
loss_ep = 0; n_step = 0
for X_train_a, y_train_a in tl.iterate.minibatches(X_train, Y_train,
batch_size, shuffle=True):
feed_dict = {x: X_train_a, y_: y_train_a, is_training: True, keep_prob: train_keep_prob}
loss, _ = sess.run([cost, train_op], feed_dict=feed_dict)
loss_ep += loss
n_step += 1
loss_ep = loss_ep/ n_step
if (epoch+1) % save_freq == 0:
model_name = save_dir + str(epoch+1) + '_model'
saver.save(sess, save_path=model_name)
The main issue seems to be that there's no graph building in your inference code. You either need to save the whole graph (in SavedModel format), or build a graph in your inference code and load your variables via a training checkpoint (probably the easiest to start). As long as the variable names are the same, you can load variables saved from the training graph into the inference graph.
So inference will be your training code but without the y_ placeholder and without the loss/optimizer logic. You can feed a single image (batch size 1) to start, so no need for batching logic either.
Dataset Description
The dataset contains a set of question pairs and a label which tells if the questions are same. e.g.
"How do I read and find my YouTube comments?" , "How can I see all my
Youtube comments?" , "1"
The goal of the model is to identify if the given question pair is same or different.
Approach
I have created a Siamese network to identify if two questions are same. Following is the model:
graph = tf.Graph()
with graph.as_default():
embedding_placeholder = tf.placeholder(tf.float32, shape=embedding_matrix.shape, name='embedding_placeholder')
with tf.variable_scope('siamese_network') as scope:
labels = tf.placeholder(tf.int32, [batch_size, None], name='labels')
keep_prob = tf.placeholder(tf.float32, name='question1_keep_prob')
with tf.name_scope('question1') as question1_scope:
question1_inputs = tf.placeholder(tf.int32, [batch_size, seq_len], name='question1_inputs')
question1_embedding = tf.get_variable(name='embedding', initializer=embedding_placeholder, trainable=False)
question1_embed = tf.nn.embedding_lookup(question1_embedding, question1_inputs)
question1_lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
question1_drop = tf.contrib.rnn.DropoutWrapper(question1_lstm, output_keep_prob=keep_prob)
question1_multi_lstm = tf.contrib.rnn.MultiRNNCell([question1_drop] * lstm_layers)
q1_initial_state = question1_multi_lstm.zero_state(batch_size, tf.float32)
question1_outputs, question1_final_state = tf.nn.dynamic_rnn(question1_multi_lstm, question1_embed, initial_state=q1_initial_state)
scope.reuse_variables()
with tf.name_scope('question2') as question2_scope:
question2_inputs = tf.placeholder(tf.int32, [batch_size, seq_len], name='question2_inputs')
question2_embedding = question1_embedding
question2_embed = tf.nn.embedding_lookup(question2_embedding, question2_inputs)
question2_lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
question2_drop = tf.contrib.rnn.DropoutWrapper(question2_lstm, output_keep_prob=keep_prob)
question2_multi_lstm = tf.contrib.rnn.MultiRNNCell([question2_drop] * lstm_layers)
q2_initial_state = question2_multi_lstm.zero_state(batch_size, tf.float32)
question2_outputs, question2_final_state = tf.nn.dynamic_rnn(question2_multi_lstm, question2_embed, initial_state=q2_initial_state)
Calculate the cosine distance using the RNN outputs:
with graph.as_default():
diff = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(question1_outputs[:, -1, :], question2_outputs[:, -1, :])), reduction_indices=1))
margin = tf.constant(1.)
labels = tf.to_float(labels)
match_loss = tf.expand_dims(tf.square(diff, 'match_term'), 0)
mismatch_loss = tf.expand_dims(tf.maximum(0., tf.subtract(margin, tf.square(diff)), 'mismatch_term'), 0)
loss = tf.add(tf.matmul(labels, match_loss), tf.matmul((1 - labels), mismatch_loss), 'loss_add')
distance = tf.reduce_mean(loss)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(distance)
Following is the code to train the model:
with graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=graph) as sess:
sess.run(tf.global_variables_initializer(), feed_dict={embedding_placeholder: embedding_matrix})
iteration = 1
for e in range(epochs):
summary_writer = tf.summary.FileWriter('/Users/mithun/projects/kaggle/quora_question_pairs/logs', sess.graph)
summary_writer.add_graph(sess.graph)
for ii, (x1, x2, y) in enumerate(get_batches(question1_train, question2_train, label_train, batch_size), 1):
feed = {question1_inputs: x1,
question2_inputs: x2,
labels: y[:, None],
keep_prob: 0.9
}
loss1 = sess.run([distance], feed_dict=feed)
if iteration%5==0:
print("Epoch: {}/{}".format(e, epochs),
"Iteration: {}".format(iteration),
"Train loss: {:.3f}".format(loss1))
if iteration%50==0:
val_acc = []
for x1, x2, y in get_batches(question1_val, question2_val, label_val, batch_size):
feed = {question1_inputs: x1,
question2_inputs: x2,
labels: y[:, None],
keep_prob: 1
}
batch_acc = sess.run([accuracy], feed_dict=feed)
val_acc.append(batch_acc)
print("Val acc: {:.3f}".format(np.mean(val_acc)))
iteration +=1
saver.save(sess, "checkpoints/quora_pairs.ckpt")
I have trained the above model with about 10,000 labeled data. But, the accuracy is stagnant at around 0.630 and strangely the validation accuracy is same across all the iterations.
lstm_size = 64
lstm_layers = 1
batch_size = 128
learning_rate = 0.001
Is there anything wrong with the way I have created the model?
This is a common problem with imbalanced datasets like the recently released Quora dataset which you are using. Since the Quora dataset is imbalanced (~63% negative and ~37% positive examples) you need proper initialization of weights. Without weight initialization your solution will be stuck in a local minima and it will train to predict only the negative class. Hence the 63% accuracy, because that is the percentage of 'not similar' questions in your validation data. If you check the results obtained on your validation set you will notice that it predicts all zeros. A truncated normal distribution proposed in He et al., http://arxiv.org/abs/1502.01852 is a good alternate for initializing the weights.
I am trying to build a linear classifier with CIFAR - 100 using TensorFlow. I got the code from Martin Gorner's MNIST tutorial and change a bit. When I run this code, tensorflow does not training (code is running but accuracy remains 1.0 and loss(cross entropy remains as 4605.17), I don't know what is wrong, I am actually newbie to TF any help is appreciated.
import pickle
import numpy as np
import os
import tensorflow as tf
from tensorflow.python.framework import tensor_util
import math
#imports data
def unpickle(file):
import pickle
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
cifar100_test = {}
cifar100_train = {}
labelMap = {}
labelNames = {}
# Load the raw CIFAR-10 data.
cifar100_test = unpickle('dataset/cifar-100-python/test')
cifar100_train = unpickle('dataset/cifar-100-python/train')
labelMap = unpickle('dataset/cifar-100-python/meta')
#tr for training data and te for testing data, X is data, Y is label
Xtr = cifar100_train[b'data']
Yr = cifar100_train[b'fine_labels']
Xte = cifar100_test[b'data']
Ye = cifar100_test[b'fine_labels']
classNames = labelMap[b'fine_label_names']
num_train = Xtr.shape[0]
num_test = Xte.shape[0]
num_class = len(classNames)
Ytr = np.zeros([num_train, num_class])
Yte = np.zeros([num_test, num_class])
Ytr[0:num_train, Yr[0:num_train]] = 1
Yte[0:num_test, Ye[0:num_test]] = 1
# As a sanity check, we print out the size of the training and test data.
print('Train data shape:', Xtr.shape)
print('Train Label shape:', Ytr.shape)
print('Test data shape:', Xte.shape)
print('Test Label shape:', Yte.shape)
print('Name of Predicted Class:', classNames[0]) #indice of the label name is the indice of the class.
Xtrain = Xtr#[:1000]
Xtest = Xte#[:100]
Ytrain = Ytr#[:1000]
Ytest = Yte#[:100]
print('Train data shape:', Xtrain.shape)
print('Train Label shape:', Ytrain.shape)
print('Test data shape:', Xtest.shape)
print('Test Label shape:', Ytest.shape)
Xtrain = np.reshape(Xtrain,(50000, 32, 32, 3)).transpose(0,1,2,3).astype(float)
Xtest = np.reshape(Xtest,(10000, 32, 32, 3)).transpose(0,1,2,3).astype(float)
Xbatches = np.split(Xtrain, 500); #second number is # of batches
Ybatches = np.split(np.asarray(Ytrain), 500);
XtestB = np.split(Xtest, 100);
YtestB = np.split(Ytest, 100);
print('X # of batches:', len(Xbatches))
print('Y # of batches:', len(Ybatches))
# input X: 28x28 grayscale images, the first dimension (None) will index the images in the mini-batch
X = tf.placeholder(tf.float32, [100, 32, 32, 3])
# correct answers will go here
Y_ = tf.placeholder(tf.float32, [100, 100])
# weights W[784, 10] 784=28*28
W = tf.Variable(tf.zeros([3072, 100]))
# biases b[10]
b = tf.Variable(tf.zeros([100]))
# flatten the images into a single line of pixels
# -1 in the shape definition means "the only possible dimension that will preserve the number of elements"
XX = tf.reshape(X, [-1, 3072])
# The model
Y = tf.nn.softmax(tf.matmul(XX, W) + b)
# loss function: cross-entropy = - sum( Y_i * log(Yi) )
# Y: the computed output vector
# Y_: the desired output vector
# cross-entropy
# log takes the log of each element, * multiplies the tensors element by element
# reduce_mean will add all the components in the tensor
# so here we end up with the total cross-entropy for all images in the batch
cross_entropy = -tf.reduce_mean(Y_ * tf.log(Y)) * 1000.0 # normalized for batches of 100 images,
# *10 because "mean" included an unwanted division by 10
# accuracy of the trained model, between 0 (worst) and 1 (best)
correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# training, learning rate = 0.005
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
# init
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
for i in range(500):
# the backpropagation training step
t, Loss = sess.run([train_step, cross_entropy], feed_dict={X: Xbatches[i], Y_: Ybatches[i]})
print(Loss)
print(i)
for i in range(100):
print('accuracy:', sess.run(accuracy, feed_dict={X: XtestB[i], Y_: YtestB[i]}))
You compute the accuracy a hundred times after the training process is completed. Nothing will change there. You should place your print('accuracy:'....) within the for loop in which you perform the backpropagation:
for i in range(500):
# the backpropagation training step
t, Loss = sess.run([train_step, cross_entropy], feed_dict={X: Xbatches[i], Y_: Ybatches[i]})
print(Loss)
print(i)
print('accuracy:', sess.run(accuracy, feed_dict={X: XtestB[i], Y_: YtestB[i]}))
Sorry for the post it turns out that it is a basic mistake.
I changed following;
Ytr[0:num_train, Yr[0:num_train]] = 1
Yte[0:num_test, Ye[0:num_test]] = 1
with
Ytr[range(num_train), Yr_temp[range(num_train)]] = 1
Yte[range(num_test), Ye_temp[range(num_test)]] = 1
First one make all values 1, but I just wanted to make indice of the true class 1 and other elements 0. Thanks for your time.
When I run the following code, it prints a constant loss at every training step; I also tried printing the parameters, which also do not change.
I can't seem to figure out why train_step, which uses a GradientDescentOptimizer, doesnt change the weights in W_fc1, b_fc1, W_fc2, and b_fc2.
I'm a beginner to machine learning so I might be missing something obvious.
(An answer for a similar question was that weights should not be initialized at zero, but the weights here are initialized with truncated normal so that cant be the problem).
import tensorflow as tf
import numpy as np
import csv
import random
with open('wine_data.csv', 'rb') as csvfile:
input_arr = list(csv.reader(csvfile, delimiter=','))
for i in range(len(input_arr)):
input_arr[i][0] = int(input_arr[i][0]) - 1 # 0 index for one hot
for j in range(1, len(input_arr[i])):
input_arr[i][j] = float(input_arr[i][j])
random.shuffle(input_arr)
training_data = np.array(input_arr[:2*len(input_arr)/3]) # train on first two thirds of data
testing_data = np.array(input_arr[2*len(input_arr)/3:]) # test on last third of data
x_train = training_data[0:, 1:]
y_train = training_data[0:, 0]
x_test = testing_data[0:, 1:]
y_test = testing_data[0:, 0]
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
x = tf.placeholder(tf.float32, shape=[None, 13], name='x')
y_ = tf.placeholder(tf.float32, shape=[None], name='y_')
y_one_hot = tf.one_hot(tf.cast(y_, tf.int32), 3) # actual y values
W_fc1 = weight_variable([13, 128])
b_fc1 = bias_variable([128])
fc1 = tf.matmul(x, W_fc1)+b_fc1
W_fc2 = weight_variable([128, 3])
b_fc2 = bias_variable([3])
y = tf.nn.softmax(tf.matmul(fc1, W_fc2)+b_fc2)
cross_entropy = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(labels=y_one_hot, logits=y))
train_step = tf.train.GradientDescentOptimizer(1e-17).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_one_hot,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
for _ in range(1000):
train_step.run(feed_dict={x: x_train, y_: y_train})
if _%10 == 0:
loss = cross_entropy.eval(feed_dict={x: x_train, y_: y_train})
print('step', _, 'loss', loss)
Thanks in advance.
From the official tensorflow documentation:
WARNING: This op expects unscaled logits, since it performs a softmax on logits internally for efficiency. Do not call this op with the output of softmax, as it will produce incorrect results.
Remove the softmax on y before feeding it into tf.nn.softmax_cross_entropy_with_logits
Also set your learning rate to something higher (like 3e-4)
I have simple model for MNIST data classification with accuracy around 92%.
I would like to know if there is any way I can provide image with digit and get label as output for that digit ? Image can be from mnist test data, rather than custom image, just to avoid image preprocessing? Below is code for my model.
Thanks
import tensorflow as tf
#reset graph
tf.reset_default_graph()
#constants
learning_rate = 0.5
batch_size = 100
training_epochs = 5
logs_path = "/tmp/mnist/2"
#load mnist data set
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
with tf.name_scope('inputs'):
x = tf.placeholder(tf.float32, shape=[None,784], name = "image-input")
y_= tf.placeholder(tf.float32, shape=[None, 10], name = "labels-input")
#weights
with tf.name_scope("weights"):
W = tf.Variable(tf.zeros([784,10]))
#biases
with tf.name_scope("biases"):
b= tf.Variable(tf.zeros([10]))
#Activation function softmax
with tf.name_scope("softmax"):
#y is prediction
y = tf.nn.softmax(tf.matmul(x,W) +b)
#Cost function
with tf.name_scope('cross_entropy'):
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y),reduction_indices=[1])) #????
#Define Optimizer
with tf.name_scope('train'):
train_optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
#Accuracy
with tf.name_scope('Accuracy'):
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
tf.summary.scalar("cost",cross_entropy)
tf.summary.scalar("accuracy",accuracy)
#Merge all summaries into a single "operation" which will be executed in a session
summary_op = tf.summary.merge_all()
with tf.Session() as sess:
#initialize variables before using them
sess.run(tf.global_variables_initializer())
#log writer object
# writer = tf.train.SummaryWriter(logs_path, graph=tf.get_default_graph())
writer = tf.summary.FileWriter(logs_path,graph=tf.get_default_graph())
#training cycles
for epoch in range(training_epochs):
#number of batches in one epoch
batch_count = int(mnist.train.num_examples/batch_size)
for i in range(batch_count):
batch_x, batch_y = mnist.train.next_batch(batch_size)
_,summary = sess.run([train_optimizer,summary_op], feed_dict={x: batch_x, y_:batch_y})
writer.add_summary(summary,epoch * batch_count + i)
if epoch % 5 == 0:
print("Epoch: ",epoch)
print("Accuracy: ",accuracy.eval(feed_dict={x: mnist.test.images,y_:mnist.test.labels}))
print("Done")
After you trained the network, you can get the label that the network gives to a new image by doing
new_image_label= sess.run(y, feed_dict={x: new_image})
Note that the format of new_image should be the same as of batch_x. Think about new_image as a batch of size 1, so if batch_x is 2D, new_image should also be 2D (of shape 1 by 784).
In addition, if you did some pre-processing (like normalization for example) to the images in batch_x, you need to do the same thing with new_image.
You could also get the labels of several images simultaneously with the same code as above. Just replace new_image with some 2D array of several images new_images.