I'm new in tensorflow and i follow example at here, but i have one question.
Codes are as follows:
import numpy as np
import tensorflow as tf
from time import time
import math
from include.data import get_data_set
from include.model import model, lr
train_x, train_y = get_data_set("train")
test_x, test_y = get_data_set("test")
x, y, output, y_pred_cls, global_step, learning_rate = model()
global_accuracy = 0
# PARAMS
_BATCH_SIZE = 128
_EPOCH = 60
_SAVE_PATH = "./tensorboard/cifar-10-v1.0.0/"
# LOSS AND OPTIMIZER
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=output, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,
beta1=0.9,
beta2=0.999,
epsilon=1e-08).minimize(loss, global_step=global_step)
# PREDICTION AND ACCURACY CALCULATION
correct_prediction = tf.equal(y_pred_cls, tf.argmax(y, axis=1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# SAVER
merged = tf.summary.merge_all()
saver = tf.train.Saver()
sess = tf.Session()
train_writer = tf.summary.FileWriter(_SAVE_PATH, sess.graph)
try:
print("\nTrying to restore last checkpoint ...")
last_chk_path = tf.train.latest_checkpoint(checkpoint_dir=_SAVE_PATH)
saver.restore(sess, save_path=last_chk_path)
print("Restored checkpoint from:", last_chk_path)
except ValueError:
print("\nFailed to restore checkpoint. Initializing variables instead.")
sess.run(tf.global_variables_initializer())
def train(epoch):
batch_size = int(math.ceil(len(train_x) / _BATCH_SIZE))
i_global = 0
for s in range(batch_size):
batch_xs = train_x[s*_BATCH_SIZE: (s+1)*_BATCH_SIZE]
batch_ys = train_y[s*_BATCH_SIZE: (s+1)*_BATCH_SIZE]
start_time = time()
i_global, _, batch_loss, batch_acc = sess.run(
[global_step, optimizer, loss, accuracy],
feed_dict={x: batch_xs, y: batch_ys, learning_rate: lr(epoch)})
duration = time() - start_time
if s % 10 == 0:
percentage = int(round((s/batch_size)*100))
bar_len = 29
filled_len = int((bar_len*int(percentage))/100)
bar = '=' * filled_len + '>' + '-' * (bar_len - filled_len)
msg = "Global step: {:>5} - [{}] {:>3}% - acc: {:.4f} - loss: {:.4f} - {:.1f} sample/sec"
print(msg.format(i_global, bar, percentage, batch_acc, batch_loss, _BATCH_SIZE / duration))
test_and_save(i_global, epoch)
def test_and_save(_global_step, epoch):
global global_accuracy
i = 0
predicted_class = np.zeros(shape=len(test_x), dtype=np.int)
while i < len(test_x):
j = min(i + _BATCH_SIZE, len(test_x))
batch_xs = test_x[i:j, :]
batch_ys = test_y[i:j, :]
predicted_class[i:j] = sess.run(
y_pred_cls,
feed_dict={x: batch_xs, y: batch_ys, learning_rate: lr(epoch)}
)
i = j
correct = (np.argmax(test_y, axis=1) == predicted_class)
acc = correct.mean()*100
correct_numbers = correct.sum()
mes = "\nEpoch {} - accuracy: {:.2f}% ({}/{})"
print(mes.format((epoch+1), acc, correct_numbers, len(test_x)))
if global_accuracy != 0 and global_accuracy < acc:
summary = tf.Summary(value=[
tf.Summary.Value(tag="Accuracy/test", simple_value=acc),
])
train_writer.add_summary(summary, _global_step)
saver.save(sess, save_path=_SAVE_PATH, global_step=_global_step)
mes = "This epoch receive better accuracy: {:.2f} > {:.2f}. Saving session..."
print(mes.format(acc, global_accuracy))
global_accuracy = acc
elif global_accuracy == 0:
global_accuracy = acc
print("###########################################################################################################")
def main():
for i in range(_EPOCH):
print("\nEpoch: {0}/{1}\n".format((i+1), _EPOCH))
train(i)
if __name__ == "__main__":
main()
sess.close()
In this example, i think, both test and traning data feeds networks, normally only train data must feed network. I can not see any difference between train() and test_and_save() functions. Am i wrong? Thanks
Here is an explanation if I understood your question correctly. The train function is called every epoch and iterates through the training data. At the end of the epoch the test_and_save function is called where the model accuracy is evaluated. This iterates through the test data on the learned weights and calculates the accuracy and saves the model. This is repeated _EPOCH times.
Edit: The model is saved in the test_and_save function. However, the weights are only updated (gradients calculated) when optimizer is passed through sess.run() in the train function. In the test_and_save function the test data is fed to the network however only y_pred_cls is evaluated by passing to sess.run().
Related
I am trying to use the Inception model (GoogLeNet) from this link https://github.com/tensorflow/models/blob/master/research/slim/nets/inception_v1.py which is implemented by Google using the Tensorflow Slim API, to classify images from the Cifar10 dataset (dataset link https://www.cs.toronto.edu/~kriz/cifar.html). The problem is that the network cost stays almost constant and I can't find the bug. I am very new to tensorflow and slim, so I whould really appreciate any help.
I am using these packages:
import tensorflow as tf
import numpy as np
from tensorflow.python.framework import ops
import matplotlib.pyplot as plt
import os
import pickle
import cv2
from sklearn import model_selection as ms
from nets import inception_v1,inception_utils
import math
%matplotlib inline
And I made theese two functions:
def one_hot_matrix(labels, C):
C = tf.constant(C,name='C')
one_hot_matrix = tf.one_hot(labels,C,axis=0)
sess = tf.Session()
one_hot = sess.run(one_hot_matrix)
sess.close()
return one_hot
def make_mini_batches(X, Y, mini_batch_size):
m = X.shape[0]
mini_batches = []
# number of mini batches of size mini_batch_size in the dataset
num_complete_minibatches = math.floor(m/mini_batch_size)
for k in range(0, num_complete_minibatches):
mini_batch_X = X[k*mini_batch_size : (k+1)*mini_batch_size,...]
mini_batch_Y = Y[k*mini_batch_size : (k+1)*mini_batch_size,:]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# Handling the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
mini_batch_X = X[num_complete_minibatches*mini_batch_size:,...]
mini_batch_Y = Y[num_complete_minibatches*mini_batch_size:,:]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
First, I am reading the dataset:
# function to read the batches
def load_cfar10_batch(cifar10_dataset_folder_path, batch_id):
with open(cifar10_dataset_folder_path + '/data_batch_' + str(batch_id), mode='rb') as file:
# note the encoding type is 'latin1'
batch = pickle.load(file, encoding='latin1')
features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
labels = batch['labels']
datadict = {'data':features,'labels':labels}
return datadict
# combine batches into one dataset (batch size: 10000)
full_data = load_cfar10_batch('./cifar_10',1)['data']
full_labels = []
for i in range(5):
full_labels.extend(load_cfar10_batch('./cifar_10',i+1)['labels'])
if i > 0:
full_data = np.concatenate((full_data,load_cfar10_batch('./cifar_10',i+1)['data']),axis = 0)
# dataset sizes
full_data.shape, len(full_labels)
Followed by some preprocessing and train/validation split:
# data preprocessing (using only 1/10 of the dataset for speed)
X = full_data[0:5000]
y = one_hot_matrix(full_labels[0:5000], 10).T
# split into training-validation sets
x_train, x_val, y_train, y_val = ms.train_test_split(X, y, test_size=0.2, random_state=1)
x_train = x_train.astype('float32')
x_val = x_val.astype('float32')
x_train = x_train / 255.0
x_val = x_val / 255.0
print('x_train shape:',x_train.shape)
print('y_train shape:',y_train.shape)
print('x_val shape:',x_val.shape)
print('y_val shape:',y_val.shape)
Then I initialize the variables:
tf.set_random_seed(1)
seed = 3
(m, n_H, n_W, n_C) = x_train.shape
n_y = y_train.shape[1]
costs = []
print_cost = True
learning_rate = 0.001
num_epochs = 100
minibatch_size = 256
num_minibatches = int(m / minibatch_size)
minibatches = make_mini_batches(x_train, y_train, minibatch_size)
ops.reset_default_graph()
inputs = tf.placeholder(tf.float32,shape=[None, n_H, n_W, n_C],name = 'inputs')
labels = tf.placeholder(tf.int8,shape=[None, n_y],name = 'labels')
# Forward propagation (Inception)
Z = inception_v1.inception_v1(inputs,num_classes = n_y,dropout_keep_prob=1,global_pool=True)[1]['Logits']
# Cost function
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits = Z, labels = labels))
# ADAM optimizer
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
# Initialize variables
init = tf.global_variables_initializer()
And then the training loop:
with tf.Session() as sess:
sess.run(init)
for epoch in range(num_epochs):
# learning rate decay
if epoch % 8 == 0:
learning_rate *= math.pow(0.95,epoch/8)
minibatch_cost = 0.
for minibatch in minibatches:
(minibatch_X, minibatch_Y) = minibatch
_ , temp_cost = sess.run([optimizer, cost], feed_dict={inputs: minibatch_X, labels: minibatch_Y})
minibatch_cost += temp_cost / num_minibatches
# Print the cost every epoch
if print_cost == True and epoch % 5 == 0:
print ("Cost after epoch %i: %f" % (epoch, minibatch_cost),", Learning rate: %f" %(learning_rate))
if print_cost == True and epoch % 1 == 0:
costs.append(minibatch_cost)
# Plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('Cost')
plt.xlabel('Iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
# Calculate the correct predictions
predict_op = tf.argmax(Z, 1)
correct_prediction = tf.equal(predict_op, tf.argmax(labels, 1))
# Calculate accuracy on the validation set
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
#print(accuracy)
train_accuracy = accuracy.eval({inputs: x_train, labels: y_train})
val_accuracy = accuracy.eval({inputs: x_val, labels: y_val})
print("Train Accuracy:", train_accuracy)
print("Validation Accuracy:", val_accuracy)
The output seems like this:
Cost after epoch 0: 2.455999 , Learning rate: 0.001000
Cost after epoch 5: 2.454697 , Learning rate: 0.001000
Cost after epoch 10: 2.454670 , Learning rate: 0.000950
Cost after epoch 15: 2.454655 , Learning rate: 0.000950
Cost after epoch 20: 2.454650 , Learning rate: 0.000857
Cost after epoch 25: 2.454649 , Learning rate: 0.000735
Cost after epoch 30: 2.454659 , Learning rate: 0.000735
Cost after epoch 35: 2.454643 , Learning rate: 0.000599
Cost after epoch 40: 2.454627 , Learning rate: 0.000463
...
...
And so my network is not training.
I managed to find the solution. I had to put the argument scope of inception before calling it, something like this:
with slim.arg_scope(inception_v1.inception_v1_arg_scope()):
Z = inception_v1.inception_v1(inputs,num_classes = n_y,dropout_keep_prob=1,global_pool=True)[1]['Logits']
After that, everything works just fine.
I have a TensorFlow (TF) model that I'd like to restore and retrain some of its parameters. I know that tf.get_operation_by_name('name of the optimizer') retrieves the original optimizer that was used to train the model before it was stored. However, I don't know how to pass the new list of TF variables that I want the optimizer to retrain!
This example helps illustrate what I want to do:
learning_rate = 0.0001
training_iters = 60000
batch_size = 64
display_step = 20
ImVecDim = 784# The number of elements in a an image vector (flattening a 28x28 2D image)
NumOfClasses = 10
dropout = 0.8
with tf.Session() as sess:
LoadMod = tf.train.import_meta_graph('simple_mnist.ckpt.meta') # This object loads the model
LoadMod.restore(sess, tf.train.latest_checkpoint('./')) # Loading weights and biases and other stuff to the model
g = tf.get_default_graph()
# Variables to be retrained:
wc2 = g.get_tensor_by_name('wc2:0')
bc2 = g.get_tensor_by_name('bc2:0')
wc3 = g.get_tensor_by_name('wc3:0')
bc3 = g.get_tensor_by_name('bc3:0')
wd1 = g.get_tensor_by_name('wd1:0')
bd1 = g.get_tensor_by_name('bd1:0')
wd2 = g.get_tensor_by_name('wd2:0')
bd2 = g.get_tensor_by_name('bd2:0')
out_w = g.get_tensor_by_name('out_w:0')
out_b = g.get_tensor_by_name('out_b:0')
VarToTrain = [wc2,wc3,wd1,wd2,out_w,bc2,bc3,bd1,bd2,out_b]
# Retrieving the optimizer:
Opt = tf.get_operation_by_name('Adam')
# Retraining:
X = g.get_tensor_by_name('ImageIn:0')
Y = g.get_tensor_by_name('LabelIn:0')
KP = g.get_tensor_by_name('KeepProb:0')
accuracy = g.get_tensor_by_name('NetAccuracy:0')
cost = g.get_tensor_by_name('loss:0')
step = 1
while step * batch_size < training_iters:
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
#########################################################################
# Here I want to pass (VarToTrain) to the optimizer (Opt)! #
#########################################################################
if step % display_step == 0:
acc = sess.run(accuracy, feed_dict={X: batch_xs, Y: batch_ys, KP: 1.})
loss = sess.run(cost, feed_dict={X: batch_xs, Y: batch_ys, KP: 1.})
print("Iter " + str(step * batch_size) + ", Minibatch Loss= " + "{:.6f}".format(
loss) + ", Training Accuracy= " + "{:.5f}".format(acc))
step += 1
feed_dict = {X: mnist.test.images[:256], Y: mnist.test.labels[:256], KP: 1.0}
ModelAccuracy = sess.run(accuracy, feed_dict)
print('Retraining finished'+', Test Accuracy = %f' %ModelAccuracy)
Well, I have not figured out a way to do what I want exactly, but I've found a way around the problem; instead of passing a new list of variables to the original optimizer, I defined a new optimizer with those variables passed to its minimize() method. The code is given below:
learning_rate = 0.0001
training_iters = 60000
batch_size = 64
display_step = 20
ImVecDim = 784# The number of elements in a an image vector (flattening a 28x28 2D image)
NumOfClasses = 10
dropout = 0.8
with tf.Session() as sess:
LoadMod = tf.train.import_meta_graph('simple_mnist.ckpt.meta') # This object loads the model
LoadMod.restore(sess, tf.train.latest_checkpoint('./')) # Loading weights and biases and other stuff to the model
g = tf.get_default_graph()
# Retraining:
X = g.get_tensor_by_name('ImageIn:0')
Y = g.get_tensor_by_name('LabelIn:0')
KP = g.get_tensor_by_name('KeepProb:0')
accuracy = g.get_tensor_by_name('NetAccuracy:0')
cost = g.get_tensor_by_name('loss:0')
######################## Producing a list and defining a new optimizer ####################################
VarToTrain = g.get_collection_ref('trainable__variables')
del VarToTrain[0] # Removing a variable from the list
del VarToTrain[5] # Removing another variable from the list
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).\
minimize(cost,var_list= VarToTrain)
##########################################################################################
step = 1
while step * batch_size < training_iters:
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
sess.run(optimizer, feed_dict={X: batch_xs, Y: batch_ys, KP: dropout})
if step % display_step == 0:
acc = sess.run(accuracy, feed_dict={X: batch_xs, Y: batch_ys, KP: 1.})
loss = sess.run(cost, feed_dict={X: batch_xs, Y: batch_ys, KP: 1.})
print("Iter " + str(step * batch_size) + ", Minibatch Loss= " + "{:.6f}".format(
loss) + ", Training Accuracy= " + "{:.5f}".format(acc))
step += 1
feed_dict = {X: mnist.test.images[:256], Y: mnist.test.labels[:256], KP: 1.0}
ModelAccuracy = sess.run(accuracy, feed_dict)
print('Retraining finished'+', Test Accuracy = %f' %ModelAccuracy)
The code above did the job, but it has some issues! First, for some reason, I keep getting error messages every time I define a similar optimizer to the original one, tf.train.AdamOtimizer(). The only optimizer that I can define without TF throwing me error messages is the tf.train.GradientDescentOptimizer(). The other issue in this solution is its inconvenience; in order to produce a list of the variables I want to train, I first have to produce a list of all trainable variables using VarToTrain = g.get_collection_ref('trainable_variables'), print them out, memorize the location of the unwanted variables in the list, then, delete them one by one using del method!! There must be a more elegant way to doing that. What I have done works fine only for small networks!!
I'm implementing tflearn's lstm imdb example by tensorflow.
I used the same dataset, architecture and hyper-parameters (embedding size, max length of sentence and so on) as tflearn model do, but my model's performance is poor than the tflearn example(after 10 epochs, my model got about 52% accuracy while the example got near 80% ).
I'd appreciated it a lot if you can give me some advice to achieve the appropriate performance of the example.
Below is my code:
import tensorflow as tf
from tflearn.data_utils import to_categorical, pad_sequences
from tflearn.datasets import imdb
from tensorflow.contrib.rnn import BasicLSTMCell
import time
n_class = 2
n_words = 10000
EMBEDDING_SIZE = 128
HIDDEN_SIZE = 128
MAX_LENGTH = 100
lr = 1e-3
epoch = 10
TRAIN_SIZE = 22500
validation_size = 2500
batch_size = 128
KP = 0.8
# IMDB Dataset loading
train, test, _ = imdb.load_data(path='imdb.pkl', n_words=n_words,
valid_portion=0.1, sort_by_len=False)
trainX, trainY = train
validationX, validationY = test
testX, testY = _
# Data preprocessing
# Sequence padding
trainX = pad_sequences(trainX, maxlen=MAX_LENGTH, value=0.)
validationX = pad_sequences(validationX, maxlen=MAX_LENGTH, value=0.)
testX = pad_sequences(testX, maxlen=MAX_LENGTH, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, n_class)
validationY = to_categorical(validationY, n_class)
testY = to_categorical(testY, n_class)
graph = tf.Graph()
with graph.as_default():
# input
text = tf.placeholder(tf.int32, [None, MAX_LENGTH])
labels = tf.placeholder(tf.float32, [None, n_class])
keep_prob = tf.placeholder(tf.float32)
embeddings_var = tf.Variable(tf.truncated_normal([n_words, EMBEDDING_SIZE]), trainable=True)
text_embedded = tf.nn.embedding_lookup(embeddings_var, text)
print(text_embedded.shape) # [batch_size, length, embedding_size]
word_list = tf.unstack(text_embedded, axis=1)
cell = BasicLSTMCell(HIDDEN_SIZE)
dropout_cell = tf.contrib.rnn.DropoutWrapper(cell, input_keep_prob=keep_prob, output_keep_prob=keep_prob)
outputs, encoding = tf.nn.static_rnn(dropout_cell, word_list, dtype=tf.float32)
logits = tf.layers.dense(outputs[-1], n_class, activation=None)
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
optimizer = tf.train.AdamOptimizer(lr).minimize(loss)
prediction = tf.argmax(logits, 1)
accuracy = tf.reduce_mean(tf.cast(tf.equal(prediction, tf.argmax(labels, 1)), tf.float32))
train_steps = epoch * TRAIN_SIZE // batch_size + 1
print("Train steps: ", train_steps)
with tf.Session(graph=graph) as sess:
tf.global_variables_initializer().run()
print("Initialized!")
s = time.time()
offset = 0
for step in range(train_steps):
offset = (offset * step) % (TRAIN_SIZE - batch_size)
batch_text = trainX[offset: offset + batch_size, :]
batch_label = trainY[offset: offset + batch_size, :]
fd = {text: batch_text, labels: batch_label, keep_prob: KP}
_, l, acc = sess.run([optimizer, loss, accuracy], feed_dict=fd)
if step % 100 == 0:
print("Step: %d loss: %f accuracy: %f" % (step, l, acc))
if step % 500 == 0:
v_l, v_acc = sess.run([loss, accuracy], feed_dict={
text: validationX,
labels: validationY,
keep_prob: 1.0
})
print("------------------------------------------------")
print("Validation: step: %d loss: %f accuracy: %f" % (step, v_l, v_acc))
print("------------------------------------------------")
print("Training finished, time consumed:", time.time() - s, " s")
print("Test accuracy: %f" % accuracy.eval(feed_dict={
text: testX,
labels: testY,
keep_prob: 1.0
}))
Sorry, I made a stupid mistake!
The loss :
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
are supposed to be
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))
then, the accuracy is like the tflearn example
I am trying to train a neural network for Poker Hand Dataset (10 classes). I have tried to change mnist exampe to fit for this. However, for my program, the accuracy is always about 50%, that is so bothersome. How can I improve the accuracy?
def init_weights(shape):
""" Weight initialization """
weights = tf.random_normal(shape, stddev=0.1)
return tf.Variable(weights)
def forwardprop(X, weights, biases):
"""
Forward-propagation.
IMPORTANT: yhat is not softmax since TensorFlow's softmax_cross_entropy_with_logits() does that internally.
"""
h = tf.nn.sigmoid(tf.add(tf.matmul(X, weights['w_1']),biases['b_1'])) # The \sigma function
yhat = tf.add(tf.matmul(h, weights['w_2']),biases['b_2']) # The \varphi function
return yhat
def get_data(filename, targetname="target", idname="", test_size=0.10, random_state=200):
#read data from csv
df = pd.read_csv(filename)
data = pd.DataFrame(df.ix[:, df.columns != targetname])
if(idname != str("")):
df = df.drop(idname, 1)
data = pd.DataFrame(df.ix[:, df.columns != targetname])
data = pd.get_dummies(data)
all_X = data.as_matrix()
target = df[targetname]
target = pd.factorize(target)[0]
# Convert target into one-hot vectors
num_labels = len(np.unique(target))
all_Y = np.eye(num_labels)[target] # One liner trick!
return train_test_split(all_X, all_Y, test_size=test_size, random_state=random_state)
def main():
start_time = time.time()
train_X, test_X, train_y, test_y = get_data(filename = './data/poker-train.csv', targetname = "class")
#customized for this dataset (or any large dataset), must be chosen as per the data, need to find some generic way
#for small datasets: batch size can be 1 (for more accuracy),
#for large ones: somewhr around 50-80, if taken 1 very slow,50-80 is a trade off of accuracy for time
learning_rate = 0.01
training_epochs = 100
batch_size = 1
# Layer's sizes
x_size = train_X.shape[1] # Number of input nodes
h_size = train_X.shape[1] # Number of hidden nodes
y_size = train_y.shape[1] # Number of outcomes
# Symbols
X = tf.placeholder("float", shape=[None, x_size])
y = tf.placeholder("float", shape=[None, y_size])
# Weight initializations
weights = {
'w_1' : init_weights((x_size, h_size)),
'w_2' : init_weights((h_size, y_size))
}
# Bias initializations
biases = {
'b_1': init_weights([h_size]),
'b_2': init_weights([y_size])
}
# Forward propagation
yhat = forwardprop(X, weights, biases)
predict = tf.argmax(yhat, axis=1)
# Backward propagation
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=yhat))
updates = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# Run SGD
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
total_batch = int(train_X.shape[0]/batch_size)
# Launch the graph
with tf.Session() as sess:
sess.run(init)
for epoch in range(training_epochs):
beg_i=0
# Loop over all batches
for i in range(total_batch):
end_i = beg_i + batch_size
if(end_i > train_X.shape[0]):
end_i = train_X.shape[0]
batch_x, batch_y = train_X[beg_i:end_i,:],train_y[beg_i:end_i,:]
beg_i = beg_i + batch_size
sess.run(updates, feed_dict={X: batch_x, y: batch_y})
train_accuracy = np.mean(np.argmax(train_y, axis=1) == sess.run(predict, feed_dict={X: train_X, y: train_y}))
test_accuracy = np.mean(np.argmax(test_y, axis=1) == sess.run(predict, feed_dict={X: test_X, y: test_y}))
print("Epoch = %d, train accuracy = %.2f%%, test accuracy = %.2f%%"
% (epoch + 1, 100. * train_accuracy, 100. * test_accuracy))
# # Test model
# correct_prediction = tf.equal(tf.argmax(predict, 1), tf.argmax(y, 1))
# # Calculate accuracy
# accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
# print( "Accuracy:", accuracy.eval({X: test_X, y: test_y}))
print("Total time of execution: ",time.time()-start_time)
if __name__ == '__main__':
main()
Output is
Epoch = 100, train accuracy = 55.77%, test accuracy = 55.30%
Epoch = 1, train accuracy = 50.13%, test accuracy = 50.20%
batch_size = 50#1
training_epochs = int(train_X.shape[0]/batch_size)
# Layer's sizes
x_size = train_X.shape[1] # Number of input nodes
h_size = 100#train_X.shape[1] # Number of hidden nodes
y_size = train_y.shape[1] # Number of outcomes
I modify above.
Epoch = 1, train accuracy = 49.98%, test accuracy = 50.11%
Epoch = 500, train accuracy = 90.90%, test accuracy = 90.78%
In the code below l2 surprisingly returns the same value as l1, but since the optimizer is being requested in the list before l2, I expected the loss to be the new loss after training. Can I not request multiple values at the same time from the graph and expect consistent output?
import tensorflow as tf
import numpy as np
x = tf.placeholder(tf.float32, shape=[None, 10])
y = tf.placeholder(tf.float32, shape=[None, 2])
weight = tf.Variable(tf.random_uniform((10, 2), dtype=tf.float32))
loss = tf.nn.sigmoid_cross_entropy_with_logits(tf.matmul(x, weight), y)
optimizer = tf.train.AdamOptimizer(0.1).minimize(loss)
with tf.Session() as sess:
tf.initialize_all_variables().run()
X = np.random.rand(1, 10)
Y = np.array([[0, 1]])
# Evaluate loss before running training step
l1 = sess.run([loss], feed_dict={x: X, y: Y})[0][0][0]
print(l1) # 3.32393
# Running the training step
_, l2 = sess.run([optimizer, loss], feed_dict={x: X, y: Y})
print(l2[0][0]) # 3.32393 -- didn't change?
# Evaluate loss again after training step as sanity check
l3 = sess.run([loss], feed_dict={x: X, y: Y})[0][0][0]
print(l3) # 2.71041
No - the order in which you request them in the list has no effect on the evaluation order. For side-effect-having operations such as the optimizer, if you want to guarantee a specific ordering, you need to enforce it using with_dependencies or similar control-flow constructs. In general, ignoring side-effects, TensorFlow will return results to you by grabbing the node from the graph as soon as it's computed - and, obviously, the loss is computed before the optimizer, since the optimizer requires the loss as one of its input. (Remember that 'loss' is not a variable; it's a tensor; so it's not actually affected by the optimizer step.)
sess.run([loss, optimizer], ...)
and
sess.run([optimizer, loss], ...)
are equivalent.
As Dave points out, the order of arguments to Session.run() has no effect on the order of evaluation, and the loss tensor in your example does not have a dependency on the optimizer op. To add a dependency, you could use tf.control_dependencies() to add an explicit dependency on the optimizer running before fetching the loss:
with tf.control_dependencies([optimizer]):
loss_after_optimizer = tf.identity(loss)
_, l2 = sess.run([optimizer, loss_after_optimizer], feed_dict={x: X, y: Y})
I've tested logistic regression implemented in tensorflow with three ways of session.run:
all together
res1, res2, res3 = sess.run([op1, op2, op3])
separately
res1 = sess.run(op1)
res2 = sess.run(op2)
res3 = sess.run(op3)
with dependencies
with tf.control_dependencies([op1]):
op2_after = tf.identity(op1)
op3_after = tf.identity(op1)
res1,res2,res3 = session.run([op1, op2_after, op3_after])
set batch size as 10000, the result is:
1: 0.05+ secs < 2: 0.11+ secs < 3: 0.25+ secs
The main difference between 1 and 3 is only one mini-batch. It may not worth it to use 3 instead of 1.
Here is the test code (it is an LR example written by someone else...).
Here is the data
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 2 13:38:14 2017
#author: inse7en
"""
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle
import time
pickle_file = '/Users/inse7en/Downloads/notMNIST.pickle'
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save # hint to help gc free up memory
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
image_size = 28
num_labels = 10
def reformat(dataset, labels):
dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32)
# Map 2 to [0.0, 1.0, 0.0 ...], 3 to [0.0, 0.0, 1.0 ...]
labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)
return dataset, labels
train_dataset, train_labels = reformat(train_dataset, train_labels)
valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
# This is to expedite the process
train_subset = 10000
# This is a good beta value to start with
beta = 0.01
graph = tf.Graph()
with graph.as_default():
# Input data.
# They're all constants.
tf_train_dataset = tf.constant(train_dataset[:train_subset, :])
tf_train_labels = tf.constant(train_labels[:train_subset])
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables
# They are variables we want to update and optimize.
weights = tf.Variable(tf.truncated_normal([image_size * image_size, num_labels]))
biases = tf.Variable(tf.zeros([num_labels]))
# Training computation.
logits = tf.matmul(tf_train_dataset, weights) + biases
# Original loss function
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
# Loss function using L2 Regularization
regularizer = tf.nn.l2_loss(weights)
loss = tf.reduce_mean(loss + beta * regularizer)
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(tf.matmul(tf_valid_dataset, weights) + biases)
test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)
num_steps = 50
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
with tf.Session(graph=graph) as session:
# This is a one-time operation which ensures the parameters get initialized as
# we described in the graph: random weights for the matrix, zeros for the
# biases.
tf.initialize_all_variables().run()
print('Initialized')
for step in range(num_steps):
# Run the computations. We tell .run() that we want to run the optimizer,
# and get the loss value and the training predictions returned as numpy
# arrays.
#_, l, predictions = session.run([optimizer, loss, train_prediction])
start_time = time.time()
with tf.control_dependencies([optimizer]):
loss_after_optimizer = tf.identity(loss)
predictions_after = tf.identity(train_prediction)
regularizers_after = tf.identity(regularizer)
_, l, predictions,regularizers = session.run([optimizer, loss_after_optimizer, predictions_after, regularizers_after])
print("--- with dependencies: %s seconds ---" % (time.time() - start_time))
#start_time = time.time()
#opt = session.run(optimizer)
#l = session.run(loss)
#predictions = session.run(train_prediction)
#regularizers = session.run(regularizer)
#print("--- run separately: %s seconds ---" % (time.time() - start_time))
#start_time = time.time()
#_, l, predictions,regularizers = session.run([optimizer, loss, train_prediction, regularizer])
#print("--- all together: %s seconds ---" % (time.time() - start_time))
#if (step % 100 == 0):
#print('Loss at step {}: {}'.format(step, l))
#print('Training accuracy: {:.1f}'.format(accuracy(predictions,
#train_labels[:train_subset, :])))
# Calling .eval() on valid_prediction is basically like calling run(), but
# just to get that one numpy array. Note that it recomputes all its graph
# dependencies.
# You don't have to do .eval above because we already ran the session for the
# train_prediction
#print('Validation accuracy: {:.1f}'.format(accuracy(valid_prediction.eval(),
#valid_labels)))
#print('Test accuracy: {:.1f}'.format(accuracy(test_prediction.eval(), test_labels)))
#print(regularizer)