Cannot load int variable from previous session in tensorflow 1.1 - variables

I have read many similar questions and just cannot get this to work properly.
I have my model being trained well and checkpoint files are being made every epoch. I want to have it so the program can continue from epoch x once reloaded and also for it to print that is on that epoch with every iteration. I could simply save the data outside of the checkpoint file, however I was also wanting to do this to give me confidence everything else is also being stored properly.
Unfortunately the value in the epoch/global_step variable is always still 0 when I restart.
import tensorflow as tf
import numpy as np
import tensorflow as tf
import numpy as np
# more imports
def extract_number(f): # used to get latest checkpint file
s = re.findall("epoch(\d+).ckpt",f)
return (int(s[0]) if s else -1,f)
def restore(init_op, sess, saver): # called to restore or just initialise model
list = glob(os.path.join("./params/e*"))
if list:
file = max(list,key=extract_number)
saver.restore(sess, file[:-5])
sess.run(init_op)
return
with tf.Graph().as_default() as g:
# build models
total_batch = data.train.num_examples / batch_size
epochLimit = 51
saver = tf.train.Saver()
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
saver = tf.train.Saver()
init_op = tf.global_variables_initializer()
restore(init_op, sess, saver)
epoch = global_step.eval()
while epoch < epochLimit:
total_batch = data.train.num_examples / batch_size
for i in range(int(total_batch)):
sys.stdout.flush()
voxels = newData.eval()
batch_z = np.random.uniform(-1, 1, [batch_size, z_size]).astype(np.float32)
sess.run(opt_G, feed_dict={z:batch_z, train:True})
sess.run(opt_D, feed_dict={input:voxels, z:batch_z, train:True})
with open("out/loss.csv", 'a') as f:
batch_loss_G = sess.run(loss_G, feed_dict={z:batch_z, train:False})
batch_loss_D = sess.run(loss_D, feed_dict={input:voxels, z:batch_z, train:False})
msgOut = "Epoch: [{0}], i: [{1}], G_Loss[{2:.8f}], D_Loss[{3:.8f}]".format(epoch, i, batch_loss_G, batch_loss_D)
print(msgOut)
epoch=epoch+1
sess.run(global_step.assign(epoch))
saver.save(sess, "params/epoch{0}.ckpt".format(epoch))
batch_z = np.random.uniform(-1, 1, [batch_size, z_size]).astype(np.float32)
voxels = sess.run(x_, feed_dict={z:batch_z})
v = voxels[0].reshape([32, 32, 32]) > 0
util.save_binvox(v, "out/epoch{0}.vox".format(epoch), 32)
I also update the global step variable using assign at the bottom. Any ideas? Any help would be greatly appreciated.

When you call sess.run(init_op) after restoring this resets all variables to their initial values. Comment that line out and things should work.

My original code was wrong for several reasons because I was trying so many things. The first responder Alexandre Passos gives a valid point, but I believe what changed the game was also the use of scopes (maybe?).
Below is the working updated code if it helps anyone:
import tensorflow as tf
import numpy as np
# more imports
def extract_number(f): # used to get latest checkpint file
s = re.findall("epoch(\d+).ckpt",f)
return (int(s[0]) if s else -1,f)
def restore(sess, saver): # called to restore or just initialise model
list = glob(os.path.join("./params/e*"))
if list:
file = max(list,key=extract_number)
saver.restore(sess, file[:-5])
return saver, True, sess
saver = tf.train.Saver()
init_op = tf.global_variables_initializer()
sess.run(init_op)
return saver, False , sess
batch_size = 100
learning_rate = 0.0001
beta1 = 0.5
z_size = 100
save_interval = 1
data = dataset.read()
total_batch = data.train.num_examples / batch_size
def fill_queue():
for i in range(int(total_batch*epochLimit)):
sess.run(enqueue_op, feed_dict={batch: data.train.next_batch(batch_size)}) # runnig in seperate thread to feed a FIFOqueue
with tf.variable_scope("glob"):
global_step = tf.get_variable(name='global_step', initializer=0,trainable=False)
# build models
epochLimit = 51
saver = tf.train.Saver()
with tf.Session() as sess:
saver,rstr,sess = restore(sess, saver)
with tf.variable_scope("glob", reuse=True):
epocht = tf.get_variable(name='global_step', trainable=False, dtype=tf.int32)
epoch = epocht.eval()
while epoch < epochLimit:
total_batch = data.train.num_examples / batch_size
for i in range(int(total_batch)):
sys.stdout.flush()
voxels = newData.eval()
batch_z = np.random.uniform(-1, 1, [batch_size, z_size]).astype(np.float32)
sess.run(opt_G, feed_dict={z:batch_z, train:True})
sess.run(opt_D, feed_dict={input:voxels, z:batch_z, train:True})
with open("out/loss.csv", 'a') as f:
batch_loss_G = sess.run(loss_G, feed_dict={z:batch_z, train:False})
batch_loss_D = sess.run(loss_D, feed_dict={input:voxels, z:batch_z, train:False})
msgOut = "Epoch: [{0}], i: [{1}], G_Loss[{2:.8f}], D_Loss[{3:.8f}]".format(epoch, i, batch_loss_G, batch_loss_D)
print(msgOut)
epoch=epoch+1
sess.run(global_step.assign(epoch))
saver.save(sess, "params/epoch{0}.ckpt".format(epoch))
batch_z = np.random.uniform(-1, 1, [batch_size, z_size]).astype(np.float32)
voxels = sess.run(x_, feed_dict={z:batch_z})
v = voxels[0].reshape([32, 32, 32]) > 0
util.save_binvox(v, "out/epoch{0}.vox".format(epoch), 32)

Related

'numpy.dtype' object has no attribute 'base_dtype'

I am new to tensorflow, so this is how my code unfolds!
import tensorflow as tf
import tensorflow.contrib.learn as learn
mnist = learn.datasets.mnist.read_data_sets('MNIST-data',one_hot=True)
import numpy as np
M = tf.Variable(tf.zeros([784,10]))
B = tf.Variable(tf.zeros([10]))
image_holder = tf.placeholder(tf.float32,[None,784])
label_holder = tf.placeholder(tf.float32,[None,10])
predicted_value = tf.add(tf.matmul(image_holder,M),B)
loss= tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=predicted_value , labels=label_holder))
learning_rate = 0.01
num_epochs = 1000
batch_size = 100
num_batches = int(mnist.train.num_examples/batch_size)
init = tf.global_variables_initializer()
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
with tf.Session() as sess:
sess.run(init)
for _ in range(num_epochs):
for each_batch in range(num_batches):
current_image, current_image_label = mnist.train.next_batch(batch_size)
optimizer_value,loss = sess.run([optimizer,loss],feed_dict={image_holder:current_image,label_holder:current_image_label})
print ("The loss value is {} \n".format(loss))
But the problem I am getting is, this strange error that says
'numpy.dtype' object has no attribute 'base_dtype'
I do not know what is wrong with the code which I think is absolutely correct. Any help regarding this issue ?
FIrst of all some comments:
Initialization of variables should always go at the end of the construction graph
Optimizer and train op should be separated; not necessary but it is a good practice.
Also when you run sess.run(variable) be sure not to name this as the same variable. That is, make sure you are not doing this: variable=sess.run(variable). Because you overwrite it.
The error here was the last one. So the code, once working, could be something like:
M = tf.Variable(tf.zeros([784,10]), dtype=tf.float32)
B = tf.Variable(tf.zeros([10]), dtype=tf.float32)
image_holder = tf.placeholder(tf.float32,[None,784])
label_holder = tf.placeholder(tf.float32,[None,10])
predicted_value = tf.add(tf.matmul(image_holder,M),B)
loss= tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=predicted_value , labels=label_holder))
learning_rate = 0.01
num_epochs = 1000
batch_size = 100
num_batches = int(mnist.train.num_examples/batch_size)
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for _ in range(num_epochs):
for each_batch in range(num_batches):
current_image, current_image_label = mnist.train.next_batch(batch_size)
optimizer_value,loss_value = sess.run([train_op,loss],feed_dict={image_holder:current_image,label_holder:current_image_label})
print ("The loss value is {} \n".format(loss_value))
Hope this has helped
More explicitly, you just overwrote your node loss by the value of 'loss' when you did sess.run([_, loss]) for the first time. So the second time of the for loop the session see a numpy value in the place of the original loss op

When restoring a network, an operation cannot be found in the restored graph

Using TensorFlow 1.9, I want to train a neural network in one Python file, and then restore the network using a different Python file. I have tried to do this using a simple example, but when I try to load my "prediction" operation, I receive an error. Specifically, the error is: KeyError: "The name 'prediction' refers to an Operation not in the graph.".
Below is my Python file to train and save the network. It generates some example data and trains a simple neural network, then saves the network every epoch.
import numpy as np
import tensorflow as tf
input_data = np.zeros([100, 10])
label_data = np.zeros([100, 1])
for i in range(100):
for j in range(10):
input_data[i, j] = i * j / 1000
label_data[i] = 2 * input_data[i, 0] + np.random.uniform(0.01)
input_placeholder = tf.placeholder(tf.float32, shape=[None, 10], name='input_placeholder')
label_placeholder = tf.placeholder(tf.float32, shape=[None, 1], name='label_placeholder')
x = tf.layers.dense(inputs=input_placeholder, units=10, activation=tf.nn.relu)
x = tf.layers.dense(inputs=x, units=10, activation=tf.nn.relu)
prediction = tf.layers.dense(inputs=x, units=1, name='prediction')
loss_op = tf.reduce_mean(tf.square(prediction - label_placeholder))
train_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss_op)
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch_num in range(100):
_, loss = sess.run([train_op, loss_op], feed_dict={input_placeholder: input_data, label_placeholder: label_data})
print('epoch ' + str(epoch_num) + ', loss = ' + str(loss))
saver.save(sess, '../Models/model', global_step=epoch_num + 1)
And below is my Python file to restore the network. It loads the input and output placeholders, together with the operation required for making predictions. However, even though I have named an operation as prediction in the training code above, the code below cannot seem to find this operation in the loaded graph.
import tensorflow as tf
import numpy as np
input_data = np.zeros([100, 10])
for i in range(100):
for j in range(10):
input_data[i, j] = i * j / 1000
with tf.Session() as sess:
saver = tf.train.import_meta_graph('../Models/model-99.meta')
saver.restore(sess, '../Models/model-99')
graph = tf.get_default_graph()
input_placeholder = graph.get_tensor_by_name('input_placeholder:0')
label_placeholder = graph.get_tensor_by_name('label_placeholder:0')
prediction = graph.get_operation_by_name('prediction')
pred = sess.run([prediction], feed_dict={input_placeholder: input_data})
Why can this code not find this operation, and what should I do to correct my code?
You have to modify a single line in your loading script (tested with tf 1.8):
prediction = graph.get_tensor_by_name('prediction/BiasAdd:0')
You have to specify which tensor you want to access, as prediction is only the namespace for the dense layer. You can check the exact name during saving with prediction.name. And when restoring, use tf.get_tensor_by_name as you are interested in the value, not the operation producing it.

how to restore variables in fully_connected function

In my training file(train.py), I write:
def deep_part(self):
with tf.variable_scope("deep-part"):
y_deep = tf.reshape(self.embeddings, shape=[-1, self.field_size * self.factor_size]) # None * (F*K)
# self.deep_layers = 2
for i in range(0,len(self.deep_layers)):
y_deep = tf.contrib.layers.fully_connected(y_deep, self.deep_layers[i], \
activation_fn=self.deep_layers_activation, scope = 'fc%d' % i)
return y_deep
now in predict file(predict.py), I restore the checkpoint, but I dont know how to reload the "deep-part" network's weights and biases.Because I think the "fully_conncted" function might hide the weights and biases.
I wrote a lengthy explanation here. A short summary:
By saver.save(sess, '/tmp/my_model') Tensorflow produces multiple files:
checkpoint
my_model.data-00000-of-00001
my_model.index
my_model.meta
The checkpoint file checkpoint is just a pointer to the latest version of our model-weights and it is simply a plain text file containing
$ !cat /tmp/model/checkpoint
model_checkpoint_path: "/tmp/my_model"
all_model_checkpoint_paths: "/tmp/my_model"
The others are binary files containing the graph (.meta) and weights (.data*).
You can help yourself by running
import tensorflow as tf
import numpy as np
data = np.arange(9 * 1).reshape(1, 9).astype(np.float32)
plhdr = tf.placeholder(tf.float32, shape=[1, 9], name='input')
print plhdr.name
activation = tf.layers.dense(plhdr, 10, name='fc')
print activation.name
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
expected = sess.run(activation, {plhdr: data})
print expected
saver = tf.train.Saver(tf.global_variables())
saver.save(sess, '/tmp/my_model')
tf.reset_default_graph()
with tf.Session() as sess:
# load the computation graph (the fully connected + placeholder)
loader = tf.train.import_meta_graph('/tmp/my_model.meta')
sess.run(tf.global_variables_initializer())
plhdr = tf.get_default_graph().get_tensor_by_name('input:0')
activation = tf.get_default_graph().get_tensor_by_name('fc/BiasAdd:0')
actual = sess.run(activation, {plhdr: data})
assert np.allclose(actual, expected) is False
# now load the weights
loader = loader.restore(sess, '/tmp/my_model')
actual = sess.run(activation, {plhdr: data})
assert np.allclose(actual, expected) is True

why am I getting a 100% error rate (RNN for spam)

I am learning tensor flow by modifying some examples I've found. To start off with I have taken an RNN example to try against the "Spam" data set from UCI.
My code and the sample data set can be found in full here:
https://trinket.io/python/c7d6b95452
When I run the code I get a 100% error rate. I figure even if this data set was not well suited for this particular model that I'd get at least something better than that, so I don't think it's my choice of a sample data set.
Below is my Python code. If anyone can suggest how to modify this to get the model to work properly I would appreciate it! I'd also appreciate any general tensor flow advice too.
# Example for my blog post at:
# https://danijar.com/introduction-to-recurrent-networks-in-tensorflow/
import functools
import os
import sets
import random
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import rnn, rnn_cell
def lazy_property(function):
attribute = '_' + function.__name__
#property
#functools.wraps(function)
def wrapper(self):
if not hasattr(self, attribute):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return wrapper
class SequenceClassification:
def __init__(self, data, target, dropout, num_hidden=200, num_layers=3):
self.data = data
self.target = target
self.dropout = dropout
self._num_hidden = num_hidden
self._num_layers = num_layers
self.prediction
self.error
self.optimize
#lazy_property
def prediction(self):
# Recurrent network.
network = rnn_cell.GRUCell(self._num_hidden)
network = rnn_cell.DropoutWrapper(
network, output_keep_prob=self.dropout)
network = rnn_cell.MultiRNNCell([network] * self._num_layers)
output, _ = tf.nn.dynamic_rnn(network, self.data, dtype=tf.float32)
# Select last output.
output = tf.transpose(output, [1, 0, 2])
last = tf.gather(output, int(output.get_shape()[0]) - 1)
# Softmax layer.
weight, bias = self._weight_and_bias(
self._num_hidden, int(self.target.get_shape()[1]))
prediction = tf.nn.softmax(tf.matmul(last, weight) + bias)
return prediction
#lazy_property
def cost(self):
cross_entropy = -tf.reduce_sum(self.target *tf.log(self.prediction))
return cross_entropy
#lazy_property
def optimize(self):
learning_rate = 0.003
optimizer = tf.train.RMSPropOptimizer(learning_rate)
return optimizer.minimize(self.cost)
#lazy_property
def error(self):
mistakes = tf.not_equal(
tf.argmax(self.target, 1), tf.argmax(self.prediction, 1))
return tf.reduce_mean(tf.cast(mistakes, tf.float32))
#staticmethod
def _weight_and_bias(in_size, out_size):
weight = tf.truncated_normal([in_size, out_size], stddev=0.01)
bias = tf.constant(0.1, shape=[out_size])
return tf.Variable(weight), tf.Variable(bias)
def main():
sample_size=10
num_classes=2 #spam or ham
##
# import spam data
##
spam_data=[]
spam_data_train=[]
spam_data_test=[]
data_dir="."
data_file="spam.csv"
with open(os.path.join(data_dir, data_file), "r") as file_handle:
for row in file_handle:
spam_data.append(row)
spam_data=[line.rstrip().split(",") for line in spam_data if len(line) >=1]
random.shuffle(spam_data)
spam_data_train=spam_data[0:int(len(spam_data)*.8)]
spam_data_test=spam_data[int(len(spam_data)*.8):int(len(spam_data))]
def next_train_batch(batch_size):
a=random.sample(spam_data_train, batch_size)
return [np.array([line[:-1] for line in a]), np.array([line[len(line)-1] for line in a])]
def train_batch():
return [np.array([line[:-1] for line in spam_data_train]),np.array([line[len(line)-1] for line in spam_data_train])]
def next_test_batch(batch_size):
a=random.sample(spam_data_test, batch_size)
return [np.array([line[:-1] for line in a]), np.array([line[len(line)-1] for line in a])]
def test_batch():
return [np.array([line[:-1] for line in spam_data_test]),np.array([line[len(line)-1] for line in spam_data_test])]
t=train_batch();
train_input=t[0]
train_target=t[1]
test=test_batch()
test_input=t[0]
test_target=t[1]
training_data = tf.placeholder(tf.float32, [None, sample_size, len(train_input[0])], "training_data")
training_target = tf.placeholder(tf.float32, [None, sample_size], "training_target")
testing_data = tf.placeholder(tf.float32, [None, len(test_input), len(test_input[0])], "testing_data")
testing_target = tf.placeholder(tf.float32, [None, len(test_target)], "testing_target")
dropout = tf.placeholder(tf.float32)
training_model = SequenceClassification(training_data, training_target, dropout)
tf.get_variable_scope().reuse_variables()
testing_model = SequenceClassification(testing_data, testing_target, dropout)
sess = tf.Session()
init = tf.initialize_all_variables()
sess.run(init)
for epoch in range(sample_size):
for _ in range(100):
sample=random.sample(range(0,len(train_input)-1),sample_size)
batch_train = [train_input[i] for i in sample]
batch_target = [train_target[i] for i in sample]
sess.run(training_model.optimize, {
training_data: [batch_train], training_target: [batch_target] , dropout: 0.5})
error = sess.run(testing_model.error, {
testing_data: [test_input], testing_target: [test_target], dropout: 1.0})
print('Epoch {:2d} error {:3.1f}%'.format(epoch + 1, 100 * error))
if __name__ == '__main__':
main()

Tensorflow Model not Restoring and Running Properly

I have built and successfully trained a convolutional model, however I cannot manage to restore the model and run my evaluation on it. The program throws a bunch of errors without giving an answer.
My code for the evaluation is here:
import tensorflow as tf
import main
import Process
import Input
eval_dir = "/Users/Zanhuang/Desktop/NNP/model.ckpt-98"
checkpoint_dir = "/Users/Zanhuang/Desktop/NNP/checkpoint"
def evaluate():
with tf.Graph().as_default() as g:
images, labels = Process.eval_inputs()
forward_propgation_results = Process.forward_propagation(images)
init_op = tf.initialize_all_variables()
saver = tf.train.Saver()
top_k_op = tf.nn.in_top_k(forward_propgation_results, labels, 1)
with tf.Session(graph = g) as sess:
sess.run(init_op)
tf.train.start_queue_runners(sess=sess)
saver.restore(sess, eval_dir)
for i in range(100):
print(sess.run(top_k_op))
def main(argv = None):
evaluate()
if __name__ == '__main__':
tf.app.run()
My output to the eval looks looks like this: but without the program running. It just stays stuck there.
E tensorflow/core/client/tensor_c_api.cc:485] /Users/Zanhuang/Desktop/NNP/Prostate_Cancer_Data1.bin
[[Node: ReaderRead = ReaderRead[_class=["loc:#FixedLengthRecordReader", "loc:#input_producer"], _device="/job:localhost/replica:0/task:0/cpu:0"](FixedLengthRecordReader, input_producer)]]
ERROR:tensorflow:Exception in QueueRunner: /Users/Zanhuang/Desktop/NNP/Prostate_Cancer_Data1.bin
[[Node: ReaderRead = ReaderRead[_class=["loc:#FixedLengthRecordReader", "loc:#input_producer"], _device="/job:localhost/replica:0/task:0/cpu:0"](FixedLengthRecordReader, input_producer)]]
The following is the main part of my program where I save the model and the checkpoint files.
import Input
import Process
import time
import numpy as np
import tensorflow as tf
from datetime import datetime
FLAGS = tf.app.flags.FLAGS
def train():
with tf.Session() as sess:
images, labels = Process.inputs()
forward_propgation_results = Process.forward_propagation(images)
train_loss, cost = Process.error(forward_propgation_results, labels)
image_summary_t = tf.image_summary(images.name, images, max_images = 2)
summary_op = tf.merge_all_summaries()
init = tf.initialize_all_variables()
saver = tf.train.Saver()
sess.run(init)
saver = tf.train.Saver(tf.all_variables())
tf.train.start_queue_runners(sess = sess)
train_dir = "/Users/Zanhuang/Desktop/NNP/model.ckpt"
summary_writer = tf.train.SummaryWriter(train_dir, sess.graph)
for step in range(100):
start_time = time.time()
print(sess.run([train_loss, cost]))
duration = time.time() - start_time
if step % 1 == 0:
num_examples_per_step = FLAGS.batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, (%.1f examples/sec; %.3f ''sec/batch)')
print (format_str % (datetime.now(), step, examples_per_sec, sec_per_batch))
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
if step % 2 == 0:
checkpoint_path = train_dir
saver.save(sess, checkpoint_path, global_step = step)
def main(argv = None):
train()
if __name__ == '__main__':
tf.app.run()

Categories