I have trained many sub-models, each sub-models is a part of the last model. And then I want to use those pretrained sub models to initial the last model's parameters. I try to use SessionRunHook to load other ckpt file's model parameters to initial the last model's.
I tried the follow code but failed. Hope some advices. Thanks!
The error info is:
Traceback (most recent call last):
File "train_high_api_local.py", line 282, in <module>
tf.app.run()
File "/Users/zhouliaoming/anaconda3/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/platform/app.py", line 124, in run
_sys.exit(main(argv))
File "train_high_api_local.py", line 266, in main
clf_.train(input_fn=lambda: read_file([tables[0]], epochs_per_eval), steps=None, hooks=[hook_test]) # input yield: x, y
File "/Users/zhouliaoming/anaconda3/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/estimator/estimator.py", line 314, in train
.......
File "/Users/zhouliaoming/anaconda3/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/training/monitored_session.py", line 674, in create_session
hook.after_create_session(self.tf_sess, self.coord)
File "train_high_api_local.py", line 102, in after_create_session
saver = tf.train.Saver([ti]) # TODO: ERROR INFO: Graph is finalized and cannot be modified.
.......
File "/Users/zhouliaoming/anaconda3/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 3135, in create_op
self._check_not_finalized()
File "/Users/zhouliaoming/anaconda3/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 2788, in _check_not_finalized
raise RuntimeError("Graph is finalized and cannot be modified.")
RuntimeError: Graph is finalized and cannot be modified.
and the code detail is:
class SetTensor(session_run_hook.SessionRunHook):
""" like tf.train.LoggingTensorHook """
def after_create_session(self, session, coord):
""" Called when new TensorFlow session is created: graph is finalized and ops can no longer be added. """
graph = tf.get_default_graph()
ti = graph.get_tensor_by_name("h_1_15/bias:0")
with session.as_default():
with tf.name_scope("rewrite"):
saver = tf.train.Saver([ti]) # TODO: ERROR INFO: Graph is finalized and cannot be modified.
saver.restore(session, "/Users/zhouliaoming/data/credit_dnn/model_retrain/rm_gene_v2_sall/model.ckpt-2102")
pass
def main(unused_argv):
""" train """
norm_all_func = lambda x: tf.cond(x>1, lambda: tf.log(x), lambda: tf.identity(x))
feature_columns=[[tf.feature_column.numeric_column(COLUMNS[i], shape=fi, normalizer_fn=lambda x: tf.py_func(weight_norm2, [x], tf.float32) )] for i, fi in enumerate(FEA_DIM)] # normlized: running OK!
## use self-defined model
param = {"learning_rate": 0.0001, "feature_columns": feature_columns, "isanalysis": FLAGS.isanalysis, "isall": False}
clf_ = tf.estimator.Estimator(model_fn=model_fn_wide2deep, params=param, model_dir=ckpt_dir)
hook_test = SetTensor(["h_1_15/bias", "h_1_15/kernel"])
epochs_per_eval = 1
for n in range(int(FLAGS.num_epochs/epochs_per_eval)):
# train num_epochs
clf_.train(input_fn=lambda: read_file([tables[0]], epochs_per_eval), steps=None, hooks=[hook_test]) # input yield: x, y
SessionRunHook is not meant for this use case. As the error says, you cannot change the graph once sess.run() has been invoked.
You can assign variables using saver.restore() in your "normal code". You don't have to be inside any hooks.
Also, if you want to restore many variables and can match them to their names and shapes in a checkpoint, you might want to take a look at https://gist.github.com/iganichev/d2d8a0b1abc6b15d4a07de83171163d4. It shows some example code to restore a subset of variables.
You can do this:
class SaveAtEnd(tf.train.SessionRunHook):
def begin(self):
self._saver = # create your saver
def end(self, session):
self._saver.save(session, ...)
Related
I have saved the trained model and the weights as below.
model, history, score = fit_model(model, train_batches, val_batches, callbacks=[callback])
model.save('./model')
model.save_weights('./weights')
Then I tried to get the saved model as the following way
if __name__ == '__main__':
model = keras.models.load_model('./model', compile= False,custom_objects={"F1Score": tfa.metrics.F1Score})
test_batches, nb_samples = test_gen(dataset_test_path, 32, img_width, img_height)
predict, loss, acc = predict_model(model,test_batches, nb_samples)
print(predict)
print(acc)
print(loss)
But it gives me an error. What should I do to overcome this?
Traceback (most recent call last):
File "test_pro.py", line 34, in <module>
model = keras.models.load_model('./model',compile= False,custom_objects={"F1Score": tfa.metrics.F1Score})
File "/home/dcs2016csc007/.local/lib/python3.8/site-packages/tensorflow/python/keras/saving/save.py", line 212, in load_model
return saved_model_load.load(filepath, compile, options)
File "/home/dcs2016csc007/.local/lib/python3.8/site-packages/tensorflow/python/keras/saving/saved_model/load.py", line 138, in load
keras_loader.load_layers()
File "/home/dcs2016csc007/.local/lib/python3.8/site-packages/tensorflow/python/keras/saving/saved_model/load.py", line 379, in load_layers
self.loaded_nodes[node_metadata.node_id] = self._load_layer(
File "/home/dcs2016csc007/.local/lib/python3.8/site-packages/tensorflow/python/keras/saving/saved_model/load.py", line 407, in _load_layer
obj, setter = revive_custom_object(identifier, metadata)
File "/home/dcs2016csc007/.local/lib/python3.8/site-packages/tensorflow/python/keras/saving/saved_model/load.py", line 921, in revive_custom_object
raise ValueError('Unable to restore custom object of type {} currently. '
ValueError: Unable to restore custom object of type _tf_keras_metric currently. Please make sure that the layer implements `get_config`and `from_config` when saving. In addition, please use the `custom_objects` arg when calling `load_model()`.
Looking at the source code for Keras, the error is raised when trying to load a model with a custom object:
def revive_custom_object(identifier, metadata):
"""Revives object from SavedModel."""
if ops.executing_eagerly_outside_functions():
model_class = training_lib.Model
else:
model_class = training_lib_v1.Model
revived_classes = {
constants.INPUT_LAYER_IDENTIFIER: (
RevivedInputLayer, input_layer.InputLayer),
constants.LAYER_IDENTIFIER: (RevivedLayer, base_layer.Layer),
constants.MODEL_IDENTIFIER: (RevivedNetwork, model_class),
constants.NETWORK_IDENTIFIER: (RevivedNetwork, functional_lib.Functional),
constants.SEQUENTIAL_IDENTIFIER: (RevivedNetwork, models_lib.Sequential),
}
parent_classes = revived_classes.get(identifier, None)
if parent_classes is not None:
parent_classes = revived_classes[identifier]
revived_cls = type(
compat.as_str(metadata['class_name']), parent_classes, {})
return revived_cls._init_from_metadata(metadata) # pylint: disable=protected-access
else:
raise ValueError('Unable to restore custom object of type {} currently. '
'Please make sure that the layer implements `get_config`'
'and `from_config` when saving. In addition, please use '
'the `custom_objects` arg when calling `load_model()`.'
.format(identifier))
The method will only work fine with the custom objects of the types defined in revived_classes. As you can see, it currently only works with input layer, layer, model, network, and sequential custom objects.
In your code, you pass an tfa.metrics.F1Score class in the custom_objects argument, which is of type METRIC_IDENTIFIER, therefore, not supported (probably because it doesn't implement the get_config and from_config functions as the error output says):
keras.models.load_model('./model', compile=False, custom_objects={"F1Score": tfa.metrics.F1Score})
It's been a while since I last worked with Keras but maybe you can try and follow what was proposed in this other related answer and wrap the call to tfa.metrics.F1Score in a method. Something like this (adjust it to your needs):
def f1(y_true, y_pred):
metric = tfa.metrics.F1Score(num_classes=3, threshold=0.5)
metric.update_state(y_true, y_pred)
return metric.result()
keras.models.load_model('./model', compile=False, custom_objects={'f1': f1})
I have written a tensorflow code using the TPUEstimator, but I am having problems running it in use_tpu=False mode. I would like to run it on my local computer to make sure that all the operations are TPU-compatible. The code works fine with the normal Estimator. Here is my master code:
import logging
from tensorflow.contrib.tpu.python.tpu import tpu_config, tpu_estimator, tpu_optimizer
from tensorflow.contrib.cluster_resolver import TPUClusterResolver
from capser_7_model_fn import *
from capser_7_input_fn import *
import subprocess
from absl import flags
flags.DEFINE_bool(
'use_tpu', False,
'Use TPUs rather than plain CPUs')
tf.flags.DEFINE_string(
"tpu", default='$TPU_NAME',
help="The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string("model_dir", LOGDIR, "Estimator model_dir")
flags.DEFINE_integer(
'save_checkpoints_secs', 1000,
'Interval (in seconds) at which the model data '
'should be checkpointed. Set to 0 to disable.')
flags.DEFINE_integer(
'save_summary_steps', 100,
'Number of steps which must have run before showing summaries.')
tf.flags.DEFINE_integer("iterations", 1000,
"Number of iterations per TPU training loop.")
tf.flags.DEFINE_integer("num_shards", 8, "Number of shards (TPU chips).")
tf.flags.DEFINE_integer("batch_size", 1024,
"Mini-batch size for the training. Note that this "
"is the global batch size and not the per-shard batch.")
FLAGS = tf.flags.FLAGS
if FLAGS.use_tpu:
my_project_name = subprocess.check_output(['gcloud', 'config', 'get-value', 'project'])
my_zone = subprocess.check_output(['gcloud', 'config', 'get-value', 'compute/zone'])
cluster_resolver = TPUClusterResolver(
tpu=[FLAGS.tpu],
zone=my_zone,
project=my_project_name)
master = TPUClusterResolver(tpu=[os.environ['TPU_NAME']]).get_master()
else:
master = ''
my_tpu_run_config = tpu_config.RunConfig(
master=master,
model_dir=FLAGS.model_dir,
save_checkpoints_secs=FLAGS.save_checkpoints_secs,
save_summary_steps=FLAGS.save_summary_steps,
session_config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True),
tpu_config=tpu_config.TPUConfig(iterations_per_loop=FLAGS.iterations, num_shards=FLAGS.num_shards),
)
# create estimator for model (the model is described in capser_7_model_fn)
capser = tpu_estimator.TPUEstimator(model_fn=model_fn_tpu,
config=my_tpu_run_config,
use_tpu=FLAGS.use_tpu,
train_batch_size=batch_size,
params={'model_batch_size': batch_size_per_shard})
# train model
logging.getLogger().setLevel(logging.INFO) # to show info about training progress
capser.train(input_fn=train_input_fn_tpu, steps=n_steps)
I have a capsule network defined in model_fn_tpu, which returns the TPUEstimator spec. The optimizer is a standard AdamOptimizer. I have made all the changes explained here https://www.tensorflow.org/guide/using_tpu#optimizer to make my code compatible with TPUEstimator. I get the following error:
Traceback (most recent call last):
File "C:/Users/doerig/PycharmProjects/capser/TPU_playground.py", line 85, in <module>
capser.train(input_fn=train_input_fn_tpu, steps=n_steps)
File "C:\Users\doerig\AppData\Local\Continuum\Anaconda2\envs\tensorflow\lib\site-packages\tensorflow\python\estimator\estimator.py", line 363, in train
loss = self._train_model(input_fn, hooks, saving_listeners)
File "C:\Users\doerig\AppData\Local\Continuum\Anaconda2\envs\tensorflow\lib\site-packages\tensorflow\python\estimator\estimator.py", line 843, in _train_model
return self._train_model_default(input_fn, hooks, saving_listeners)
File "C:\Users\doerig\AppData\Local\Continuum\Anaconda2\envs\tensorflow\lib\site-packages\tensorflow\python\estimator\estimator.py", line 856, in _train_model_default
features, labels, model_fn_lib.ModeKeys.TRAIN, self.config)
File "C:\Users\doerig\AppData\Local\Continuum\Anaconda2\envs\tensorflow\lib\site-packages\tensorflow\python\estimator\estimator.py", line 831, in _call_model_fn
model_fn_results = self._model_fn(features=features, **kwargs)
File "C:\Users\doerig\AppData\Local\Continuum\Anaconda2\envs\tensorflow\lib\site-packages\tensorflow\contrib\tpu\python\tpu\tpu_estimator.py", line 2016, in _model_fn
features, labels, is_export_mode=is_export_mode)
File "C:\Users\doerig\AppData\Local\Continuum\Anaconda2\envs\tensorflow\lib\site-packages\tensorflow\contrib\tpu\python\tpu\tpu_estimator.py", line 1121, in call_without_tpu
return self._call_model_fn(features, labels, is_export_mode=is_export_mode)
File "C:\Users\doerig\AppData\Local\Continuum\Anaconda2\envs\tensorflow\lib\site-packages\tensorflow\contrib\tpu\python\tpu\tpu_estimator.py", line 1317, in _call_model_fn
estimator_spec = self._model_fn(features=features, **kwargs)
File "C:\Users\doerig\PycharmProjects\capser\capser_7_model_fn.py", line 101, in model_fn_tpu
**output_decoder_deconv_params)
File "C:\Users\doerig\PycharmProjects\capser\capser_model.py", line 341, in capser_model
loss_training_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step(), name="training_op")
File "C:\Users\doerig\AppData\Local\Continuum\Anaconda2\envs\tensorflow\lib\site-packages\tensorflow\python\training\optimizer.py", line 424, in minimize
name=name)
File "C:\Users\doerig\AppData\Local\Continuum\Anaconda2\envs\tensorflow\lib\site-packages\tensorflow\contrib\tpu\python\tpu\tpu_optimizer.py", line 113, in apply_gradients
summed_grads_and_vars.append((tpu_ops.cross_replica_sum(grad), var))
AttributeError: module 'tensorflow.contrib.tpu.python.ops.tpu_ops' has no attribute 'cross_replica_sum'
Any ideas to solve this problem? Thank you in advance!
I suspect this is either a bug in the version of TensorFlow you are using + Windows, or else an issue with your build of TensorFlow.
For example, when I chase down the file tensorflow\contrib\tpu\python\tpu\tpu_optimizer.py in the TF 1.4 branch, I see that tpu_ops is imported as:
from tensorflow.contrib.tpu.python.ops import tpu_ops
and if you chase that to the relevant file, you see:
if platform.system() != "Windows":
# pylint: disable=wildcard-import,unused-import,g-import-not-at-top
from tensorflow.contrib.tpu.ops.gen_tpu_ops import *
from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader
# pylint: enable=wildcard-import,unused-import,g-import-not-at-top
_tpu_ops = loader.load_op_library(
resource_loader.get_path_to_datafile("_tpu_ops.so"))
else:
# We have already built the appropriate libraries into the binary via CMake
# if we have built contrib, so we don't need this
pass
Following up with the other TF branches that existed at the time of this posting, we see similar comments in 1.5, in 1.6, in 1.7, in 1.8, and in 1.9.
I strongly suspect this would not occur under Linux, but I might test this later and edit this answer.
I have trained a model in TensorFlow and now I would like to visualize which inputs maximally activate an output. I'd like to know what the cleanest way to do this is.
I had thought to do this by creating a trainable input variable which I can assign once per run. Then by using an appropriate loss function and using an optimizer with a var_list containing just this input variable I would update this input variable until convergence. i.e.
trainable_input = tf.get_variable(
'trainable_input',
shape=data_op.get_shape(),
dtype=data_op.dtype,
initializer=tf.zeros_initializer(),
trainable=True,
collections=[tf.GraphKeys.LOCAL_VARIABLES])
trainable_input_assign_op = tf.assign(trainable_input, data_op)
data_op = trainable_input
# ... run the rest of the graph building code here, now with a trainable input
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# loss_op is defined on one of the outputs
train_op = optimizer.minimize(loss_op, var_list=[trainable_input])
However, when I do this I run into issues. If I try to restore the pre-trained graph using a Supervisor, then it naturally complains that the new variables created by the AdamOptimizer do not exist in the graph I'm trying to restore. I can remedy this by using get_slots to get the variables the AdamOptimizer creates and manually adding those variables to the tf.GraphKeys.LOCAL_VARIABLES collection, but it feels pretty hacky and I'm not sure what the consequences of this would be. I can also exclude those variables explicitly from the Saver that is passed to the Supervisor without adding them to the tf.GraphKeys.LOCAL_VARIABLES collection, but then I get an exception that they do not get properly initialized by the Supervisor:
File "/usr/local/lib/python3.5/site-packages/tensorflow/python/training/supervisor.py", line 973, in managed_session
self.stop(close_summary_writer=close_summary_writer)
File "/usr/local/lib/python3.5/site-packages/tensorflow/python/training/supervisor.py", line 801, in stop
stop_grace_period_secs=self._stop_grace_secs)
File "/usr/local/lib/python3.5/site-packages/tensorflow/python/training/coordinator.py", line 386, in join
six.reraise(*self._exc_info_to_raise)
File "/usr/local/lib/python3.5/site-packages/six.py", line 686, in reraise
raise value
File "/usr/local/lib/python3.5/site-packages/tensorflow/python/training/supervisor.py", line 962, in managed_session
start_standard_services=start_standard_services)
File "/usr/local/lib/python3.5/site-packages/tensorflow/python/training/supervisor.py", line 719, in prepare_or_wait_for_session
init_feed_dict=self._init_feed_dict, init_fn=self._init_fn)
File "/usr/local/lib/python3.5/site-packages/tensorflow/python/training/session_manager.py", line 280, in prepare_session
self._local_init_op, msg))
RuntimeError: Init operations did not make model ready. Init op: init, init fn: None, local_init_op: name: "group_deps_5"
op: "NoOp"
input: "^init_1"
input: "^init_all_tables"
, error: Variables not initialized: trainable_input/trainable_input/Adam, trainable_input/trainable_input/Adam_1
I'm not really sure why these variables are not getting initialized since I have used that technique before to exclude some variables from the restore process (GLOBAL and LOCAL) and they seem to get initialized as expected.
In short, my question is whether there is a simple way to add an optimizer to the graph and do a checkpoint restore (where the checkpoint does not contain the optimizer variables) without having to muck around with the internals of the optimizer. If that's not possible, then is there any downside to just adding the optimizer variables to the LOCAL_VARIABLES collection?
The same error occurs when I use slim library. In fact, the slim.learning.train() uses tf.train.Supervisor inside. I hope my answer on this GitHub issue may help your Supervisor problem.
I have the same problem with you. I solve it by doing following two steps.
1. pass the parameter saver to slim.learning.train()
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
saver = tf.train.Saver(var_list=optimistic_restore_vars(ckpt.model_checkpoint_path) if ckpt else None)
where function optimistic_restore_vars is defined as
def optimistic_restore_vars(model_checkpoint_path):
reader = tf.train.NewCheckpointReader(model_checkpoint_path)
saved_shapes = reader.get_variable_to_shape_map()
var_names = sorted([(var.name, var.name.split(':')[0]) for var in tf.global_variables() if var.name.split(':')[0] in saved_shapes])
restore_vars = []
name2var = dict(zip(map(lambda x:x.name.split(':')[0], f.global_variables()), tf.global_variables()))
with tf.variable_scope('', reuse=True):
for var_name, saved_var_name in var_names:
curr_var = name2var[saved_var_name]
var_shape = curr_var.get_shape().as_list()
if var_shape == saved_shapes[saved_var_name]:
restore_vars.append(curr_var)
return restore_vars
```
2. pass the parameter local_init_op to slim.learning.train() to initialize the added new variables
local_init_op = tf.global_variables_initializer()
In last, the code should look like this
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
saver = tf.train.Saver(var_list=optimistic_restore_vars ckpt.model_checkpoint_path) if ckpt else None)
local_init_op = tf.global_variables_initializer()
###########################
# Kicks off the training. #
###########################
learning.train(
train_tensor,
saver=saver,
local_init_op=local_init_op,
logdir=FLAGS.train_dir,
master=FLAGS.master,
is_chief=(FLAGS.task == 0),
init_fn=_get_init_fn(),
summary_op=summary_op,
number_of_steps=FLAGS.max_number_of_steps,
log_every_n_steps=FLAGS.log_every_n_steps,
save_summaries_secs=FLAGS.save_summaries_secs,
save_interval_secs=FLAGS.save_interval_secs,
sync_optimizer=optimizer if FLAGS.sync_replicas else None
)
Running below code tf.contrib.slim.get_variables_to_restore() return empty value [] for all_vars, and then causing failure when calling tf.train.Saver. Detail error message shows below.
Am I missing anything?
>>> import tensorflow as tf
>>> inception_exclude_scopes = ['InceptionV3/AuxLogits', 'InceptionV3/Logits', 'global_step', 'final_ops']
>>> inception_checkpoint_file = '/Users/morgan.du/git/machine-learning/projects/capstone/yelp/model/inception_v3_2016_08_28.ckpt'
>>> with tf.Session(graph=tf.Graph()) as sess:
... init_op = tf.global_variables_initializer()
... sess.run(init_op)
... reader = tf.train.NewCheckpointReader(inception_checkpoint_file)
... var_to_shape_map = reader.get_variable_to_shape_map()
... all_vars = tf.contrib.slim.get_variables_to_restore(exclude=inception_exclude_scopes)
... inception_saver = tf.train.Saver(all_vars)
... inception_saver.restore(sess, inception_checkpoint_file)
...
Traceback (most recent call last):
File "<stdin>", line 7, in <module>
File "/Users/morgan.du/miniconda2/lib/python2.7/site-packages/tensorflow/python/training/saver.py", line 1051, in __init__
self.build()
File "/Users/morgan.du/miniconda2/lib/python2.7/site-packages/tensorflow/python/training/saver.py", line 1072, in build
raise ValueError("No variables to save")
ValueError: No variables to save
The problem here seems to be that your graph is empty—i.e. it does not contain any variables. You create a new graph on the line with tf.Session(graph=tf.Graph()):, and none of the following lines creates a tf.Variable object.
To restore a pre-trained TensorFlow model, you need to do one of three things:
Rebuild the model graph, by executing the same Python graph building code that was used to train the model in the first place.
Load a "MetaGraph" that contains information about how to reconstruct the graph structure and model variables. See this tutorial for more details on how to create and use a MetaGraph. MetaGraphs are often created alongside checkpoint files, and typically have the extension .meta.
Load a "SavedModel", which contains a "MetaGraph". See the documentation here for more details.
I was trying to get a variable I created in a simple function but I keep getting errors. I am doing:
x = tf.get_variable('quadratic/x')
but the python complains as follow:
python qm_tb_scopes.py
quadratic/x:0
Traceback (most recent call last):
File "qm_tb_scopes.py", line 24, in <module>
x = tf.get_variable('quadratic/x')
File "/Users/my_username/path/tensor_flow_experiments/venv/lib/python2.7/site-packages/tensorflow/python/ops/variable_scope.py", line 732, in get_variable
partitioner=partitioner, validate_shape=validate_shape)
File "/Users/my_username/path/tensor_flow_experiments/venv/lib/python2.7/site-packages/tensorflow/python/ops/variable_scope.py", line 596, in get_variable
partitioner=partitioner, validate_shape=validate_shape)
File "/Users/my_username/path/tensor_flow_experiments/venv/lib/python2.7/site-packages/tensorflow/python/ops/variable_scope.py", line 161, in get_variable
caching_device=caching_device, validate_shape=validate_shape)
File "/Users/my_username/path/tensor_flow_experiments/venv/lib/python2.7/site-packages/tensorflow/python/ops/variable_scope.py", line 457, in _get_single_variable
"but instead was %s." % (name, shape))
ValueError: Shape of a new variable (quadratic/x) must be fully defined, but instead was <unknown>.
it seems its trying to create a new variable, but I am simply trying to get a defined one. Why is it doing this?
The whole code is:
import tensorflow as tf
def get_quaratic():
# x variable
with tf.variable_scope('quadratic'):
x = tf.Variable(10.0,name='x')
# b placeholder (simualtes the "data" part of the training)
b = tf.placeholder(tf.float32,name='b')
# make model (1/2)(x-b)^2
xx_b = 0.5*tf.pow(x-b,2)
y=xx_b
return y,x
y,x = get_quaratic()
learning_rate = 1.0
# get optimizer
opt = tf.train.GradientDescentOptimizer(learning_rate)
# gradient variable list = [ (gradient,variable) ]
print x.name
x = tf.get_variable('quadratic/x')
x = tf.get_variable(x.name)
You need to pass the option reuse=True to tf.variable_scope() if you want to get the same variable twice.
See the documentation (https://www.tensorflow.org/versions/r0.9/how_tos/variable_scope/index.html)
for more details.
Alternatively, you could get the variable once, outside your Python function, and pass it in as a argument in Python. I find that a bit cleaner since it makes it explicit what variables the code uses.
I hope that helps!
This is not the best solution, but try creating the variable through tf.get_variable() with reuse=False to ensure a new variable is created. Then, when obtaining the variable, use tf.get_variable() with reuse=True to get the current variable. Setting reuse to tf.AUTO_REUSE risks the creation of a new variable if the exact var is not present. Also make sure to specify the shape of the variable in tf.get_variable().
import tensorflow as tf
def get_quaratic():
# x variable
with tf.variable_scope('quadratic', reuse=False):
x = tf.get_variable('x', ())
tf.assign(x, 10)
# b placeholder (simualtes the "data" part of the training)
b = tf.placeholder(tf.float32,name='b')
# make model (1/2)(x-b)^2
xx_b = 0.5*tf.pow(x-b,2)
y=xx_b
return y,x
y,x = get_quaratic()
learning_rate = 1.0
# get optimizer
opt = tf.train.GradientDescentOptimizer(learning_rate)
# gradient variable list = [ (gradient,variable) ]
print (x.name)
with tf.variable_scope('', reuse=True):
x = tf.get_variable('quadratic/x', shape=())
print(tf.global_variables()) # there is only 1 variable