Can't find the saved variables - tensorflow

The variable saver program is as following:
import tensorflow as tf
sess = tf.InteractiveSession()
raw_data = [1., 2., 8., -1., 0., 5.5, 6., 13]
spikes = tf.Variable([False] * len(raw_data), name='spikes')
spikes.initializer.run()
tf.global_variables_initializer().run() #add by cs
saver = tf.train.Saver()
for i in range(1, len(raw_data)):
if raw_data[i] - raw_data[i-1] > 5:
spikes_val = spikes.eval()
spikes_val[i] = True
updater = tf.assign(spikes, spikes_val)
updater.eval()
save_path = saver.save(sess, "spikes.ckpt")
print("spikes data saved in file: %s" % save_path)
sess.close()
the restore program is as following:
import tensorflow as tf
sess = tf.InteractiveSession()
spikes = tf.Variable([False]*8, name='spikes')
saver = tf.train.Saver()
try:
saver.restore(sess, 'spikes.ckpt')
print(spikes.eval())
except:
print('file not found')
sess.close()
there is always an exception "file not found".
What's wrong?

Related

tf how to restore two variables from the same variable

I have saved a model and now I am trying to restore it in two branches, like this:
I wrote this code, and it raises ValueError: The same saveable will be restored with two names.
How do I restore two variables from the same variable?
restore_variables = {}
for varr in tf.global_variables()
if varr.op.name in checkpoint_variables:
restore_variables[varr.op.name.split("_red")[0]] = varr
restore_variables[varr.op.name.split("_blue")[0]] = varr
init_saver = tf.train.Saver(restore_variables, max_to_keep=0)
Tested on TF 1.15
Basically the error is saying that it's finding multiple references to the same variable in the restore_variables dict. The fix is simple. Create a copy of your variable using tf.Variable(varr) as follows for one of the references.
I think it's safe to assume that you're not looking for multiple references to the same variable here, rather two separate variables. (I'm assuming this because, if you want to use the same variable multiple times, you can just use the single variable multiple times).
with tf.Session() as sess:
saver.restore(sess, './vars/vars.ckpt-0')
restore_variables = {}
checkpoint_variables=['b']
for varr in tf.global_variables():
if varr.op.name in checkpoint_variables:
restore_variables[varr.op.name.split("_red")[0]] = varr
restore_variables[varr.op.name.split("_blue")[0]] = tf.Variable(varr)
print(restore_variables)
init_saver = tf.train.Saver(restore_variables, max_to_keep=0)
Below you can find the full code to replicate the issue using a toy example. Essentially, we have two variables a and b and out of that, we are creating b_red and b_blue variables.
# Saving the variables
import tensorflow as tf
import numpy as np
a = tf.placeholder(shape=[None, 3], dtype=tf.float64)
w1 = tf.Variable(np.random.normal(size=[3,2]), name='a')
out = tf.matmul(a, w1)
w2 = tf.Variable(np.random.normal(size=[2,3]), name='b')
out = tf.matmul(out, w2)
saver = tf.train.Saver([w1, w2])
with tf.Session() as sess:
tf.global_variables_initializer().run()
saved_path = saver.save(sess, './vars/vars.ckpt', global_step=0)
# Restoring the variables
with tf.Session() as sess:
saver.restore(sess, './vars/vars.ckpt-0')
restore_variables = {}
checkpoint_variables=['b']
for varr in tf.global_variables():
if varr.op.name in checkpoint_variables:
restore_variables[varr.op.name+"_red"] = varr
# Fixing the issue: Instead of varr, do tf.Variable(varr)
restore_variables[varr.op.name+"_blue"] = varr
print(restore_variables)
init_saver = tf.train.Saver(restore_variables, max_to_keep=0)
I may not be understanding the problem correctly, but can't you just make two saver objects? Something like this:
import tensorflow as tf
# Make checkpoint
with tf.Graph().as_default(), tf.Session() as sess:
a = tf.Variable([1., 2.], name='a')
sess.run(a.initializer)
b = tf.Variable([3., 4., 5.], name='b')
sess.run(b.initializer)
saver = tf.train.Saver([a, b])
saver.save(sess, 'tmp/vars.ckpt')
# Restore checkpoint
with tf.Graph().as_default(), tf.Session() as sess:
# Red
a_red = tf.Variable([0., 0.], name='a_red')
b_red = tf.Variable([0., 0., 0.], name='b_red')
saver_red = tf.train.Saver({'a': a_red, 'b': b_red})
saver_red.restore(sess, 'tmp1/vars.ckpt')
print(a_red.eval())
# [1. 2.]
print(b_red.eval())
# [3. 4. 5.]
# Blue
a_blue = tf.Variable([0., 0.], name='a_blue')
b_blue = tf.Variable([0., 0., 0.], name='b_blue')
saver_blue = tf.train.Saver({'a': a_blue, 'b': b_blue})
saver_blue.restore(sess, 'tmp/vars.ckpt')
print(a_blue.eval())
# [1. 2.]
print(b_blue.eval())
# [3. 4. 5.]

tensorflow simple estimator input function problems

I am trying to create a simple input function with the feature data being the numbers 1-10 and the labels being 0 when x < 5; 5 when x = 5 and 10 when x > 5.
example:
# data
nmbrs = [10., 1., 2., 3., 4., 5., 6. , 7., 8., 9.]
labels = [10., 0., 0., 0., 0., 5., 10., 10., 10., 10.]
# input function
input_fn = tf.estimator.inputs.numpy_input_fn(
x={'numbers': np.array(nmbrs)}, y=np.array(labels),
batch_size=batch_size, num_epochs=None, shuffle=True)
The problem i am having is that the nmbrs and labels array doesnt seem to be in the right form, i tried making it into a 2d array but that didnt work either im sure im doing something really easy wrong here...
EDIT: model and neural net functions
def neural_net(x_dict):
# TF Estimator input is a dict, in case of multiple inputs
x = x_dict['numbers']
# Hidden fully connected layer with 128 neurons
layer_1 = tf.layers.dense(x, n_hidden_1)
# Hidden fully connected layer with 128 neurons
layer_2 = tf.layers.dense(layer_1, n_hidden_2)
# Output fully connected layer with a neuron for each class
out_layer = tf.layers.dense(layer_2, num_classes)
return out_layer
# Define the model function (following TF Estimator Template)
def model_fn(features, labels, mode):
# Build the neural network
logits = neural_net(features)
# Predictions
pred_classes = tf.argmax(logits, axis=1)
pred_probas = tf.nn.softmax(logits)
# If prediction mode, early return
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode, predictions=pred_classes)
# Define loss and optimizer
loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=tf.cast(labels, dtype=tf.int32)))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op, global_step=tf.train.get_global_step())

tensorflow lite toco python APl: NameError: "name 'tempfile' is not defined"

I ran the example from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/lite/toco/g3doc/python_api.md
import tensorflow as tf
img = tf.placeholder(name="img", dtype=tf.float32, shape=(1, 64, 64, 3))
val = img + tf.constant([1., 2., 3.]) + tf.constant([1., 4., 4.])
out = tf.identity(val, name="out")
with tf.Session() as sess:
tflite_model = tf.contrib.lite.toco_convert(sess.graph_def, [img], [out])
open("test.tflite", "wb").write(tflite_modeL)
but I got the error as "NameError: name 'tempfile' is not defined" in python3 and "NameError: global name 'tempfile' is not defined" in python2
NameError Traceback (most recent call last)
<ipython-input-21-24c36564faa4> in <module>()
5 out = tf.identity(val, name="out")
6 with tf.Session() as sess:
----> 7 tflite_model = tf.contrib.lite.toco_convert(sess.graph_def, [img], [out])
8 open("test.tflite", "wb").write(tflite_modeL)
python3.5/site-packages/tensorflow/contrib/lite/python/lite.py in toco_convert(input_data, input_tensors, output_tensors, inference_type, input_format, output_format, quantized_input_stats, drop_control_dependency)
196 data = toco_convert_protos(model.SerializeToString(),
197 toco.SerializeToString(),
--> 198 input_data.SerializeToString())
199 return data
200
python3.5/site-packages/tensorflow/contrib/lite/python/lite.py in toco_convert_protos(model_flags_str, toco_flags_str, input_data_str)
89 return _toco_convert_protos(model_flags_str, toco_flags_str, input_data_str)
90
---> 91 with tempfile.NamedTemporaryFile() as fp_toco, \
92 tempfile.NamedTemporaryFile() as fp_model, \
93 tempfile.NamedTemporaryFile() as fp_input, \
NameError: name 'tempfile' is not defined
How to make it work?
This issue has been fixed since TensorFlow 1.7 (see github bug).
I just successfully executed your snippet in the latest TensorFlow version 1.9:
import tensorflow as tf
img = tf.placeholder(name="img", dtype=tf.float32, shape=(1, 64, 64, 3))
val = img + tf.constant([1., 2., 3.]) + tf.constant([1., 4., 4.])
out = tf.identity(val, name="out")
with tf.Session() as sess:
tflite_model = tf.contrib.lite.toco_convert(sess.graph_def, [img], [out])
open("converteds_model.tflite", "wb").write(tflite_model)
The TFLite model converteds_model.tflite gets saved to file.

tflearn to_categorical type error

I keep getting a typeError when I try to use to_categorical from tflearn. The output error is:`
trainY = to_categorical(y = trainY, nb_classes=2)
File "C:\Users\saleh\Anaconda3\lib\site-packages\tflearn\data_utils.py", line 46, in to_categorical
return (y[:, None] == np.unique(y)).astype(np.float32)
TypeError: list indices must be integers or slices, not tuple
This is the reproducible code that I am trying to run:
import tflearn
from tflearn.data_utils import to_categorical
from tflearn.datasets import imdb
#IMDB dataset loading
train, test, _ = imdb.load_data(path = 'imdb.pkl', n_words = 10000, valid_portion = 0.1)
trainX, trainY = train
testX, testY = test
#converting labels to binary vectors
trainY = to_categorical(y = trainY, nb_classes=2) # **This is where I get the error**
testY = to_categorical(y = testY, nb_classes=2)
Cannot reproduce your error:
import tflearn
from tflearn.data_utils import to_categorical
from tflearn.datasets import imdb
train, test, _ = imdb.load_data(path = 'imdb.pkl', n_words = 10000, valid_portion = 0.1)
trainX, trainY = train
testX, testY = test
trainY[0:5]
# [0, 0, 0, 1, 0]
trainY = to_categorical(y = trainY, nb_classes=2)
trainY[0:5]
# array([[ 1., 0.],
# [ 1., 0.],
# [ 1., 0.],
# [ 0., 1.],
# [ 1., 0.]])
System configuration:
Python 2.7.12
Tensorflow 1.3.0
TFLearn 0.3.2
Ubuntu 16.04
UPDATE: It seems that some recent TFLearn commit has broken to_categorical - see here and here. I suggest to uninstall your current version and install the latest stable one with pip install tflearn (this is actually what I have done myself above).

tensorflow NameError: name 'eval_input_fn' is not defined

I am following tensorflow's getting started. I downloaded and installed anaconda today. when run, the program below produces:
File "p3.py", line 35, in <module>
eval_loss = estimator.evaluate(input_fn=eval_input_fn)
NameError: name 'eval_input_fn' is not defined
import numpy as np
import tensorflow as tf
# Declare list of features, we only have one real-valued feature
def model(features, labels, mode):
# Build a linear model and predict values
W = tf.get_variable("W", [1], dtype=tf.float64)
b = tf.get_variable("b", [1], dtype=tf.float64)
y = W*features['x'] + b
# Loss sub-graph
loss = tf.reduce_sum(tf.square(y - labels))
# Training sub-graph
global_step = tf.train.get_global_step()
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = tf.group(optimizer.minimize(loss),
tf.assign_add(global_step, 1))
# ModelFnOps connects subgraphs we built to the
# appropriate functionality.
return tf.contrib.learn.ModelFnOps(
mode=mode, predictions=y,
loss=loss,
train_op=train)
estimator = tf.contrib.learn.Estimator(model_fn=model)
# define our data sets
x_train = np.array([1., 2., 3., 4.])
y_train = np.array([0., -1., -2., -3.])
x_eval = np.array([2., 5., 8., 1.])
y_eval = np.array([-1.01, -4.1, -7, 0.])
input_fn = tf.contrib.learn.io.numpy_input_fn({"x": x_train}, y_train, 4, num_epochs=1000)
# train
estimator.fit(input_fn=input_fn, steps=1000)
# Here we evaluate how well our model did.
train_loss = estimator.evaluate(input_fn=input_fn)
eval_loss = estimator.evaluate(input_fn=eval_input_fn) #line 35
print("train loss: %r"% train_loss)
print("eval loss: %r"% eval_loss)
For some reason you haven't copy-pasted the definition of the evaluation input function.
You can find it directly in the tutorial you linked.
Here's the line
eval_input_fn = tf.contrib.learn.io.numpy_input_fn(
{"x":x_eval}, y_eval, batch_size=4, num_epochs=1000)