ValueError: ('Could not interpret initializer identifier:', 0.2) - tensorflow

Traceback (most recent call last): File
"AutoFC_AlexNet_randomsearch_CalTech101_v2.py", line 112, in
X = layers.Dense(neurons, activation=activation, kernel_initializer=weight_init)(X) File
"/home/shabbeer/NAS/lib/python3.5/site-packages/keras/legacy/interfaces.py",
line 91, in wrapper
return func(*args, **kwargs) File "/home/shabbeer/NAS/lib/python3.5/site-packages/keras/layers/core.py",
line 824, in init
self.kernel_initializer = initializers.get(kernel_initializer) File
"/home/shabbeer/NAS/lib/python3.5/site-packages/keras/initializers.py",
line 503, in get
identifier) ValueError: ('Could not interpret initializer identifier:', 0.2)
I am getting the above error when running the code using tensorflow-gpu version 1.4.0 and keras version 2.1.3

you should change it to X = layers.Dense(neurons, activation=activation, kernel_initializer=keras.initializers.Constant(weight_init))(X)

Related

How can I integrate Optuna with Deepspeech training?

I'm trying to integrate Optuna with DeepSpeech in order to optimise some of its hyperparameters. I'm sticking to learning rate for now, just to get a feel for how Optuna works, but I've hit a roadblock and need some help.
I have a function hps_train which is what does the training step. It takes the Optuna trial object as the argument and returns the dev loss, which is what I want to use Optuna to minimise. This is the exact same function as train() in training/deepspeech_training/train.py, but with a few modifications:
def hps_train(trial):
#.
#.Same as train() in https://github.com/mozilla/DeepSpeech/blob/master/training/deepspeech_training/train.py
#.
if FLAGS.horovod:
# Effective batch size in synchronous distributed training is scaled by the number of workers. An increase in learning rate compensates for the increased batch size.
optimizer = hps_create_optimizer(learning_rate_var * hvd.size())
optimizer = hvd.DistributedOptimizer(optimizer)
else:
optimizer, learning_rate_var = hps_create_optimizer(trial)
reduce_learning_rate_op = learning_rate_var.assign(
tf.multiply(learning_rate_var, FLAGS.plateau_reduction)
)
#.
#.Same as train() https://github.com/mozilla/DeepSpeech/blob/master/training/deepspeech_training/train.py
#.
with tfv1.Session(config=Config.session_config) as session:
#.
#.Same as train() https://github.com/mozilla/DeepSpeech/blob/master/training/deepspeech_training/train.py
#.
final_dev_loss = dev_losses[-1]
log_debug("Session closed.")
return final_dev_loss
I also have some helper functions:
def hps_create_optimizer(trial):
learning_rate = trial.suggest_float("adam_lr", 1e-5, 1e-1, log=True)
with tf.variable_scope("learning_rate", reuse=tf.AUTO_REUSE):
learning_rate_var = tfv1.get_variable(
"learning_rate", initializer=learning_rate, trainable=False
)
optimizer = tfv1.train.AdamOptimizer(
learning_rate=learning_rate_var, beta1=0.9, beta2=0.999, epsilon=1e-08
)
return optimizer, learning_rate_var
def new_trial_callback(study, trial):
chkpt_path = setup_dirs(study.study_name, trial.number + 1)
FLAGS.checkpoint_dir = chkpt_path
FLAGS.save_checkpoint_dir = chkpt_path
FLAGS.load_checkpoint_dir = chkpt_path
def objective(trial, session):
if FLAGS.train_files:
val_loss = hps_train(trial, session)
return float(val_loss)
def objective_tf(trial):
tfv1.reset_default_graph()
with tfv1.Graph().as_default():
return objective(trial, session)
Putting it all together:
def main(_):
initialize_globals()
early_training_checks()
lr_study = optuna.create_study(study_name="lr_study", direction='minimize')
chkpt_dir = setup_dirs(lr_study.study_name, 0)
FLAGS.checkpoint_dir = chkpt_dir
FLAGS.save_checkpoint_dir = chkpt_dir
FLAGS.load_checkpoint_dir = chkpt_dir
lr_study.optimize(objective_tf, n_trials=25, callbacks=[new_trial_callback])
When I run this code, the first run completes normally. However, when it tries to start the second one, I get an error:
$ python training/hparam_search.py --train_files ~/datasets/cv-corpus-1/en/clips/train.csv --dev_files ~/datasets/cv-corpus-1/en/clips/dev.csv --test_files ~/datasets/cv-corpus-1/en/clips/test.csv --train_batch_size 64 --test_batch_size 64 --dev_batch_size 64 --n_hidden 512 --epochs 1 --train_cudnn --use_allow_growth --checkpoint_dir checkpoints
[I 2021-08-30 15:06:16,637] A new study created in memory with name: lr_study
I Could not find best validating checkpoint.
I Could not find most recent checkpoint.
I Initializing all variables.
I STARTING Optimization
Epoch 0 | Training | Elapsed Time: 0:00:17 | Steps: 187 | Loss: 252.374135
Epoch 0 | Validation | Elapsed Time: 0:00:12 | Steps: 109 | Loss: 255.176724 | Dataset: /home/user/datasets/cv-corpus-1/en/clips/dev.csv
I Saved new best validating model with loss 255.176724 to: checkpoints/optuna_trials/lr_study/0/best_dev-187
--------------------------------------------------------------------------------
I FINISHED optimization in 0:00:30.553797
[I 2021-08-30 15:06:50,101] Trial 0 finished with value: 255.1767243551552 and parameters: {'adam_lr': 0.006636434104761772}. Best is trial 0 with value: 255.1767243551552.
[W 2021-08-30 15:06:50,229] Trial 1 failed because of the following error: ValueError('in converted code:\n relative to /usr/local/lib/python3.6/dist-packages/tensorflow_core:\n\n contrib/cudnn_rnn/python/layers/cudnn_rnn.py:440 call\n training)\n contrib/cudnn_rnn/python/layers/cudnn_rnn.py:518 _forward\n seed=self._seed)\n contrib/cudnn_rnn/python/ops/cudnn_rnn_ops.py:1132 _cudnn_rnn\n outputs, output_h, output_c, _, _ = gen_cudnn_rnn_ops.cudnn_rnnv3(**args)\n python/ops/gen_cudnn_rnn_ops.py:2051 cudnn_rnnv3\n time_major=time_major, name=name)\n python/framework/op_def_library.py:367 _apply_op_helper\n g = ops._get_graph_from_inputs(_Flatten(keywords.values()))\n python/framework/ops.py:5979 _get_graph_from_inputs\n _assert_same_graph(original_graph_element, graph_element)\n python/framework/ops.py:5914 _assert_same_graph\n (item, original_item))\n\n ValueError: Tensor("cudnn_lstm/opaque_kernel:0", dtype=float32_ref, device=/device:GPU:0) must be from the same graph as Tensor("tower_0/Reshape_2:0", shape=(?, ?, 512), dtype=float32, device=/device:GPU:0).\n',)
Traceback (most recent call last):
File "/home/user/.local/lib/python3.6/site-packages/optuna/study/_optimize.py", line 213, in _run_trial
value_or_values = func(trial)
File "training/hparam_search.py", line 671, in objective_tf
return objective(trial)
File "training/hparam_search.py", line 660, in objective
val_loss = hps_train(trial)
File "training/hparam_search.py", line 332, in hps_train
iterator, optimizer, dropout_rates
File "/home/user/DeepSpeech/training/deepspeech_training/train.py", line 317, in get_tower_results
avg_loss, non_finite_files = calculate_mean_edit_distance_and_loss(iterator, dropout_rates, reuse=i > 0)
File "/home/user/DeepSpeech/training/deepspeech_training/train.py", line 244, in calculate_mean_edit_distance_and_loss
logits, _ = create_model(batch_x, batch_seq_len, dropout, reuse=reuse, rnn_impl=rnn_impl)
File "/home/user/DeepSpeech/training/deepspeech_training/train.py", line 195, in create_model
output, output_state = rnn_impl(layer_3, seq_length, previous_state, reuse)
File "/home/user/DeepSpeech/training/deepspeech_training/train.py", line 133, in rnn_impl_cudnn_rnn
sequence_lengths=seq_length)
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/layers/base.py", line 548, in __call__
outputs = super(Layer, self).__call__(inputs, *args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/engine/base_layer.py", line 854, in __call__
outputs = call_fn(cast_inputs, *args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/autograph/impl/api.py", line 237, in wrapper
raise e.ag_error_metadata.to_exception(e)
ValueError: in converted code:
relative to /usr/local/lib/python3.6/dist-packages/tensorflow_core:
contrib/cudnn_rnn/python/layers/cudnn_rnn.py:440 call
training)
contrib/cudnn_rnn/python/layers/cudnn_rnn.py:518 _forward
seed=self._seed)
contrib/cudnn_rnn/python/ops/cudnn_rnn_ops.py:1132 _cudnn_rnn
outputs, output_h, output_c, _, _ = gen_cudnn_rnn_ops.cudnn_rnnv3(**args)
python/ops/gen_cudnn_rnn_ops.py:2051 cudnn_rnnv3
time_major=time_major, name=name)
python/framework/op_def_library.py:367 _apply_op_helper
g = ops._get_graph_from_inputs(_Flatten(keywords.values()))
python/framework/ops.py:5979 _get_graph_from_inputs
_assert_same_graph(original_graph_element, graph_element)
python/framework/ops.py:5914 _assert_same_graph
(item, original_item))
ValueError: Tensor("cudnn_lstm/opaque_kernel:0", dtype=float32_ref, device=/device:GPU:0) must be from the same graph as Tensor("tower_0/Reshape_2:0", shape=(?, ?, 512), dtype=float32, device=/device:GPU:0).
Traceback (most recent call last):
File "training/hparam_search.py", line 691, in <module>
absl.app.run(main)
File "/usr/local/lib/python3.6/dist-packages/absl/app.py", line 303, in run
_run_main(main, args)
File "/usr/local/lib/python3.6/dist-packages/absl/app.py", line 251, in _run_main
sys.exit(main(argv))
File "training/hparam_search.py", line 684, in main
lr_study.optimize(objective_tf, n_trials=25, callbacks=[new_trial_callback])
File "/home/user/.local/lib/python3.6/site-packages/optuna/study/study.py", line 409, in optimize
show_progress_bar=show_progress_bar,
File "/home/user/.local/lib/python3.6/site-packages/optuna/study/_optimize.py", line 76, in _optimize
progress_bar=progress_bar,
File "/home/user/.local/lib/python3.6/site-packages/optuna/study/_optimize.py", line 163, in _optimize_sequential
trial = _run_trial(study, func, catch)
File "/home/user/.local/lib/python3.6/site-packages/optuna/study/_optimize.py", line 264, in _run_trial
raise func_err
File "/home/user/.local/lib/python3.6/site-packages/optuna/study/_optimize.py", line 213, in _run_trial
value_or_values = func(trial)
File "training/hparam_search.py", line 671, in objective_tf
return objective(trial)
File "training/hparam_search.py", line 660, in objective
val_loss = hps_train(trial)
File "training/hparam_search.py", line 332, in hps_train
iterator, optimizer, dropout_rates
File "/home/user/DeepSpeech/training/deepspeech_training/train.py", line 317, in get_tower_results
avg_loss, non_finite_files = calculate_mean_edit_distance_and_loss(iterator, dropout_rates, reuse=i > 0)
File "/home/user/DeepSpeech/training/deepspeech_training/train.py", line 244, in calculate_mean_edit_distance_and_loss
logits, _ = create_model(batch_x, batch_seq_len, dropout, reuse=reuse, rnn_impl=rnn_impl)
File "/home/user/DeepSpeech/training/deepspeech_training/train.py", line 195, in create_model
output, output_state = rnn_impl(layer_3, seq_length, previous_state, reuse)
File "/home/user/DeepSpeech/training/deepspeech_training/train.py", line 133, in rnn_impl_cudnn_rnn
sequence_lengths=seq_length)
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/layers/base.py", line 548, in __call__
outputs = super(Layer, self).__call__(inputs, *args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/engine/base_layer.py", line 854, in __call__
outputs = call_fn(cast_inputs, *args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/autograph/impl/api.py", line 237, in wrapper
raise e.ag_error_metadata.to_exception(e)
ValueError: in converted code:
relative to /usr/local/lib/python3.6/dist-packages/tensorflow_core:
contrib/cudnn_rnn/python/layers/cudnn_rnn.py:440 call
training)
contrib/cudnn_rnn/python/layers/cudnn_rnn.py:518 _forward
seed=self._seed)
contrib/cudnn_rnn/python/ops/cudnn_rnn_ops.py:1132 _cudnn_rnn
outputs, output_h, output_c, _, _ = gen_cudnn_rnn_ops.cudnn_rnnv3(**args)
python/ops/gen_cudnn_rnn_ops.py:2051 cudnn_rnnv3
time_major=time_major, name=name)
python/framework/op_def_library.py:367 _apply_op_helper
g = ops._get_graph_from_inputs(_Flatten(keywords.values()))
python/framework/ops.py:5979 _get_graph_from_inputs
_assert_same_graph(original_graph_element, graph_element)
python/framework/ops.py:5914 _assert_same_graph
(item, original_item))
ValueError: Tensor("cudnn_lstm/opaque_kernel:0", dtype=float32_ref, device=/device:GPU:0) must be from the same graph as Tensor("tower_0/Reshape_2:0", shape=(?, ?, 512), dtype=float32, device=/device:GPU:0).
It looks like the ValueError is complaining that some tensor is not from the same graph as another. But I don't understand how this can be, since I start each run within a new Graph context, so every tensor should be associated with this new graph.
Optuna version is 2.9.1 and Tensorflow version is 1.15.4
I'd be grateful for any insights into where I'm going wrong here, or even if this is the recommended way to use Optuna. Thanks very much!

tensorlayer can't load pretrained weight propaly

I have a problem to load pretrained params in tensorlayer.
I try to use srgan (https://github.com/tensorlayer/srgan) and its pretrained params (https://github.com/tensorlayer/srgan/releases/tag/1.2.0)
Applicable code is below
G = get_G([1, None, None, 3])
load_params = tl.files.load_npz(path='', name='g_srgan.npz')
tl.files.assign_weights(load_params,
#G.load_weights(os.path.join("g_srgan.npz"))
G.eval()
I got error:
Traceback (most recent call last):
File "train.py", line 194, in <module>
evaluate(session)
File "train.py", line 156, in evaluate
tl.files.assign_weights(load_params, G)
File "/usr/local/lib/python3.6/dist-packages/tensorlayer/files/utils.py", line 2023, in assign_weights
ops.append(network.all_weights[idx].assign(param))
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/resource_variable_ops.py", line 819, in assign
self._shape.assert_is_compatible_with(value_tensor.shape)
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/tensor_shape.py", line 1110, in assert_is_compatible_with
raise ValueError("Shapes %s and %s are incompatible" % (self, other))
ValueError: Shapes (1, 1, 1, 64) and (64,) are incompatible
Please tell me the solution.
Thanks !!

tf.clip_by_norm(grad, 1.0) throws InvalidArgumentError Shapes must be equal rank, but are 2 and 1

can someone explain why tensorflow is giving me trouble when running the following code.
import tensorflow as tf
x = tf.keras.layers.Input(shape=(1,))
y = tf.keras.layers.Dense(1, activation=tf.nn.relu)(x)
loss = tf.losses.mean_squared_error(x,y)
grad = tf.gradients(loss, tf.trainable_variables())
# !!! GIVES ME TROUBLE !!!
clipped_grad = tf.clip_by_norm(grad, 1.0)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(y, feed_dict={x: [[1.0], [2.0], [3.0]]})
The error I get:
Traceback (most recent call last):
File "D:\Program Files\Python\Python_3_6_2\lib\site-packages\tensorflow\python\framework\ops.py", line 1589, in _create_c_op
c_op = c_api.TF_FinishOperation(op_desc)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Shapes must be equal rank, but are 2 and 1
From merging shape 0 with other shapes. for 'clip_by_norm/t' (op: 'Pack') with input shapes: [1,1], [1].
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:/Users/TObs/.PyCharmCE2018.1/config/scratches/scratch.py", line 11, in <module>
clipped_grad = tf.clip_by_norm(grad, 1.0)
File "D:\Program Files\Python\Python_3_6_2\lib\site-packages\tensorflow\python\ops\clip_ops.py", line 140, in clip_by_norm
t = ops.convert_to_tensor(t, name="t")
File "D:\Program Files\Python\Python_3_6_2\lib\site-packages\tensorflow\python\framework\ops.py", line 1011, in convert_to_tensor
as_ref=False)
File "D:\Program Files\Python\Python_3_6_2\lib\site-packages\tensorflow\python\framework\ops.py", line 1107, in internal_convert_to_tensor
ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
File "D:\Program Files\Python\Python_3_6_2\lib\site-packages\tensorflow\python\ops\array_ops.py", line 960, in _autopacking_conversion_function
return _autopacking_helper(v, inferred_dtype, name or "packed")
File "D:\Program Files\Python\Python_3_6_2\lib\site-packages\tensorflow\python\ops\array_ops.py", line 923, in _autopacking_helper
return gen_array_ops.pack(elems_as_tensors, name=scope)
File "D:\Program Files\Python\Python_3_6_2\lib\site-packages\tensorflow\python\ops\gen_array_ops.py", line 5532, in pack
"Pack", values=values, axis=axis, name=name)
File "D:\Program Files\Python\Python_3_6_2\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 787, in _apply_op_helper
op_def=op_def)
File "D:\Program Files\Python\Python_3_6_2\lib\site-packages\tensorflow\python\framework\ops.py", line 3414, in create_op
op_def=op_def)
File "D:\Program Files\Python\Python_3_6_2\lib\site-packages\tensorflow\python\framework\ops.py", line 1756, in __init__
control_input_ops)
File "D:\Program Files\Python\Python_3_6_2\lib\site-packages\tensorflow\python\framework\ops.py", line 1592, in _create_c_op
raise ValueError(str(e))
ValueError: Shapes must be equal rank, but are 2 and 1
From merging shape 0 with other shapes. for 'clip_by_norm/t' (op: 'Pack') with input shapes: [1,1], [1].
Any thoughts? Im running on a Windows10 machine, with tensorflow-gpu 1.9.0, NVidia GTX 1080.
Help would be much appreciated :)
Cheers,
Tobs.
so after tinkering around I found out that one have to apply the tf.clip_by_norm seperatly for each value in the gradients tensor liek so:
clipped_gradients = [tf.clip_by_norm(g, grad_norm_clip) for g in tf.gradients(loss, tf.trainable_variables())]
I guess, thats the right way to do, right?
Cheers,
Tobs.

Do the Tensorflow Silm assume TF version 1.4 for NASNet?

I try to train NASNet-A_Mobile_224 for two class classification by using train_image_classifier.py from slim with nasnet_mobile, However I get error
TypeError: separable_convolution2d() got an unexpected keyword argument 'data_format'
I suspect that the new NASNet requires TF version 1.4. Can somebody confirm this? I'm using Tensorflow 1.3.
More extensive error is given below:
Traceback (most recent call last):
File "train_image_classifier.py", line 574, in <module>
tf.app.run()
File "/home/sami/virenv/tensorflow_vanilla/local/lib/python2.7/site-packages/tensorflow/python/platform/app.py", line 48, in run
_sys.exit(main(_sys.argv[:1] + flags_passthrough))
File "train_image_classifier.py", line 474, in main
clones = model_deploy.create_clones(deploy_config, clone_fn, [batch_queue])
File "/home/sami/projects/Tools/models/research/slim/deployment/model_deploy.py", line 193, in create_clones
outputs = model_fn(*args, **kwargs)
File "train_image_classifier.py", line 457, in clone_fn
logits, end_points = network_fn(images)
File "/home/sami/projects/Tools/models/research/slim/nets/nets_factory.py", line 135, in network_fn
return func(images, num_classes, is_training=is_training, **kwargs)
File "/home/sami/projects/Tools/models/research/slim/nets/nasnet/nasnet.py", line 371, in build_nasnet_mobile
final_endpoint=final_endpoint)
File "/home/sami/projects/Tools/models/research/slim/nets/nasnet/nasnet.py", line 450, in _build_nasnet_base
net, cell_outputs = stem()
File "/home/sami/projects/Tools/models/research/slim/nets/nasnet/nasnet.py", line 445, in <lambda>
stem = lambda: _imagenet_stem(images, hparams, stem_cell)
File "/home/sami/projects/Tools/models/research/slim/nets/nasnet/nasnet.py", line 264, in _imagenet_stem
cell_num=cell_num)
File "/home/sami/projects/Tools/models/research/slim/nets/nasnet/nasnet_utils.py", line 326, in __call__
stride, original_input_left)
File "/home/sami/projects/Tools/models/research/slim/nets/nasnet/nasnet_utils.py", line 352, in _apply_conv_operation
net = _stacked_separable_conv(net, stride, operation, filter_size)
File "/home/sami/projects/Tools/models/research/slim/nets/nasnet/nasnet_utils.py", line 183, in _stacked_separable_conv
stride=stride)
File "/home/sami/virenv/tensorflow_vanilla/local/lib/python2.7/site-packages/tensorflow/contrib/framework/python/ops/arg_scope.py", line 181, in func_with_args
return func(*args, **current_args)
TypeError: separable_convolution2d() got an unexpected keyword argument 'data_format'
YES,it must be tensorflow 1.4.0

tensorflow slim inception_v3 model error

I am trying to use the tensorflow inception_v3 model for a transfer learning project.I get the following error on building the model.
TypeError: Expected int32, got list containing Tensors of type '_Message' instead.
The same error does not arise on using the same script for inception_v1 model.
The models are imported from slim.nets
Running on CPU
Tensorflow version : 0.12.1
Script
import tensorflow as tf
slim = tf.contrib.slim
import models.inception_v3 as inception_v3
print("initializing model")
inputs=tf.placeholder(tf.float32, shape=[32,299,299,3])
with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
logits,endpoints = inception_v3.inception_v3(inputs, num_classes=1001, is_training=False)
trainable_vars=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
for tvars in trainable_vars:
print tvars.name
Full Error Message
Traceback (most recent call last):
File "test.py", line 8, in <module>
logits,endpoints = inception_v3.inception_v3(inputs, num_classes=1001, is_training=False)
File "/home/ashish/projects/python/fashion-language/models/inception_v3.py", line 576, in inception_v3
depth_multiplier=depth_multiplier)
File "/home/ashish/projects/python/fashion-language/models/inception_v3.py", line 181, in inception_v3_base
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/array_ops.py", line 1075, in concat
dtype=dtypes.int32).get_shape(
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 669, in convert_to_tensor
ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/constant_op.py", line 176, in _constant_tensor_conversion_function
return constant(v, dtype=dtype, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/constant_op.py", line 165, in constant
tensor_util.make_tensor_proto(value, dtype=dtype, shape=shape, verify_shape=verify_shape))
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/tensor_util.py", line 367, in make_tensor_proto
_AssertCompatible(values, dtype)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/tensor_util.py", line 302, in _AssertCompatible
(dtype.name, repr(mismatch), type(mismatch).__name__))
TypeError: Expected int32, got list containing Tensors of type '_Message' instead.
Found my mistake, i was importing the model from https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/slim/python/slim/nets whereas the updated models are at https://github.com/tensorflow/models/tree/master/slim/nets.
Still haven't understood why there are two different repositories for the same classes.Must be a valid reason.