Saving A Model That Contains Universal Sentence Encoder as its Embedding - tensorflow

I am trying to save a model which uses USE from tf-hub as its embedding layer and has a few FFN stacked upon it. The model seems to work fine, but I am facing a problem in saving and loading the model.
disable_eager_execution()
embed = hub.Module(module_url)
def UniversalEmbedding(x):
return embed(tf.squeeze(tf.cast(x, tf.string)))
input_text = Input(shape=[], dtype=tf.string)
response_text = Input(shape=[], dtype=tf.string)
text_embedding = Lambda(UniversalEmbedding, output_shape=(512, ))(input_text)
response_embedding = Lambda(UniversalEmbedding, output_shape=(512, ))(response_text)
response_embedding = Dense(512, activation='relu')(response_embedding)
response_embedding = Dense(512, activation='relu')(response_embedding)
score = Dot(axes=1, normalize=True)([text_embedding, response_embedding])
pred = Dense(2, activation='softmax')(score)
text_encoder = Model(inputs=[input_text], outputs=text_embedding)
response_encoder = Model(inputs=[response_text], outputs=response_embedding)
model = Model(inputs=[input_text, response_text], outputs=pred)
The code above is how I built my model (its a dual encoder model with USE as its encoder).
I had to disable eager execution because USE seems to be not working in eager execution environment yet. If not, and if there is a workaround for that, I'd really appreciate any help for this too :)
The model is trained and saved via the following code :
with tf.compat.v1.Session() as session:
K.set_session(session)
session.run(tf.compat.v1.global_variables_initializer())
session.run(tf.compat.v1.tables_initializer())
history = model.fit_generator(generator=train_neg_sample_generator,
validation_data=val_neg_sample_generator, epochs=20,
callbacks=[checkpointer, earlystopper], verbose=0)
and the model is loaded with no error when the weights in the checkpoints (saved in hdf5 files) are loaded to the model defined in the code above. So the code below works fine, only because the architecture 'model' is already defined above.
with tf.compat.v1.Session() as session:
K.set_session(session)
session.run(tf.compat.v1.global_variables_initializer())
session.run(tf.compat.v1.tables_initializer())
model.load_weights('./saved_models/weights.03-0.29.hdf5')
tf.keras.models.save_model(model, 'test_model2.hdf5')
predicts = model.predict([["how are you?", "how are you?", 'hi', 'my two favorites in one pic!'], ["i'm fine", "what the heck", 'hi', 'same!']])
print(predicts)
print(np.argmax(predicts, axis=1))
Then I tried 2 things. First, I tried to save the architecture in json format, load the model architecture and then load the weights, but it did not work. Then I tried to save the whole model via keras.models.save_model, but it did not work either.
In both cases, they returned
AttributeError: module 'tensorflow' has no attribute 'placeholder'
How can I save/load the whole model (if not at once, loading architecture / weight separately is fine too) ?
Here is the whole error log
AttributeError Traceback (most recent call last)
<ipython-input-31-47468f2533ad> in <module>()
1 from keras.models import load_model
2
----> 3 model2 = load_model('testest.h5')
13 frames
/usr/local/lib/python3.6/dist-packages/keras/engine/saving.py in load_wrapper(*args, **kwargs)
456 os.remove(tmp_filepath)
457 return res
--> 458 return load_function(*args, **kwargs)
459
460 return load_wrapper
/usr/local/lib/python3.6/dist-packages/keras/engine/saving.py in load_model(filepath, custom_objects, compile)
548 if H5Dict.is_supported_type(filepath):
549 with H5Dict(filepath, mode='r') as h5dict:
--> 550 model = _deserialize_model(h5dict, custom_objects, compile)
551 elif hasattr(filepath, 'write') and callable(filepath.write):
552 def load_function(h5file):
/usr/local/lib/python3.6/dist-packages/keras/engine/saving.py in _deserialize_model(h5dict, custom_objects, compile)
241 raise ValueError('No model found in config.')
242 model_config = json.loads(model_config.decode('utf-8'))
--> 243 model = model_from_config(model_config, custom_objects=custom_objects)
244 model_weights_group = h5dict['model_weights']
245
/usr/local/lib/python3.6/dist-packages/keras/engine/saving.py in model_from_config(config, custom_objects)
591 '`Sequential.from_config(config)`?')
592 from ..layers import deserialize
--> 593 return deserialize(config, custom_objects=custom_objects)
594
595
/usr/local/lib/python3.6/dist-packages/keras/layers/__init__.py in deserialize(config, custom_objects)
166 module_objects=globs,
167 custom_objects=custom_objects,
--> 168 printable_module_name='layer')
/usr/local/lib/python3.6/dist-packages/keras/utils/generic_utils.py in deserialize_keras_object(identifier, module_objects, custom_objects, printable_module_name)
145 config['config'],
146 custom_objects=dict(list(_GLOBAL_CUSTOM_OBJECTS.items()) +
--> 147 list(custom_objects.items())))
148 with CustomObjectScope(custom_objects):
149 return cls.from_config(config['config'])
/usr/local/lib/python3.6/dist-packages/keras/engine/network.py in from_config(cls, config, custom_objects)
1041 # First, we create all layers and enqueue nodes to be processed
1042 for layer_data in config['layers']:
-> 1043 process_layer(layer_data)
1044
1045 # Then we process nodes in order of layer depth.
/usr/local/lib/python3.6/dist-packages/keras/engine/network.py in process_layer(layer_data)
1027
1028 layer = deserialize_layer(layer_data,
-> 1029 custom_objects=custom_objects)
1030 created_layers[layer_name] = layer
1031
/usr/local/lib/python3.6/dist-packages/keras/layers/__init__.py in deserialize(config, custom_objects)
166 module_objects=globs,
167 custom_objects=custom_objects,
--> 168 printable_module_name='layer')
/usr/local/lib/python3.6/dist-packages/keras/utils/generic_utils.py in deserialize_keras_object(identifier, module_objects, custom_objects, printable_module_name)
147 list(custom_objects.items())))
148 with CustomObjectScope(custom_objects):
--> 149 return cls.from_config(config['config'])
150 else:
151 # Then `cls` may be a function returning a class.
/usr/local/lib/python3.6/dist-packages/keras/engine/base_layer.py in from_config(cls, config)
1101 A layer instance.
1102 """
-> 1103 return cls(**config)
1104
1105 def count_params(self):
/usr/local/lib/python3.6/dist-packages/keras/legacy/interfaces.py in wrapper(*args, **kwargs)
89 warnings.warn('Update your `' + object_name + '` call to the ' +
90 'Keras 2 API: ' + signature, stacklevel=2)
---> 91 return func(*args, **kwargs)
92 wrapper._original_function = func
93 return wrapper
/usr/local/lib/python3.6/dist-packages/keras/engine/input_layer.py in __init__(self, input_shape, batch_size, batch_input_shape, dtype, input_tensor, sparse, name)
85 dtype=dtype,
86 sparse=self.sparse,
---> 87 name=self.name)
88 else:
89 self.is_placeholder = False
/usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py in placeholder(shape, ndim, dtype, sparse, name)
539 x = tf.sparse_placeholder(dtype, shape=shape, name=name)
540 else:
--> 541 x = tf.placeholder(dtype, shape=shape, name=name)
542 x._keras_shape = shape
543 x._uses_learning_phase = False
AttributeError: module 'tensorflow' has no attribute 'placeholder'

Provide the whole error log, not just a part of it.
If it's really error because of saving then what about model.save('model.h5')?
Not using the module from tf.keras.models but call a methomd from the Model class itself.
But why these code?
with tf.compat.v1.Session() as session:
K.set_session(session)
session.run(tf.compat.v1.global_variables_initializer())
session.run(tf.compat.v1.tables_initializer())
I believe you could juat call model.fit right away, and your tf is version2 right? Why call compat.v1?
Tensorflow 2 doesn't have Placeholder so I think it's maybe about this.

Worked fine with tensorflow version 1.15
Looking forward for tf-hub to become completely compatible with tensorflow 2.0 and keras...

Related

AttributeError: 'numpy.ndarray' object has no attribute 'op'

I am have a time series data and I am trying to build and train an LSTM model over it. I have 1 input and 1 Output corresponding to my model. I am trying to build a Many to Many model where Input length is exactly equal to output length.
The shape of my inputs are
print(np.shape(X))
(1700,70,401)
#(examples, Timestep, Features)
Shape of my output is
print(np.shape(Y_1))
(1700,70,3)
#(examples, Timestep, Features)
Now When I am trying to approach this problem via sequential API everything is running fine.
model = Sequential()
model.add(LSTM(32, input_shape=(70,401), return_sequences=True))
model.add(Dense(3,activation='softmax'))
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.01),loss=tf.keras.losses.CategoricalCrossentropy())
model.fit(X, Y_1, epochs=2,verbose=1)
But When I am approaching it from the functional API approach then it is showing the error
AttributeError: 'numpy.ndarray' object has no attribute 'op'
input_layer = Input(shape=(70,401))
hidden = LSTM(32,return_sequences=True)(input_layer)
output_1 = Dense(3, activation='softmax')(hidden)
# output_2 = Dense(np.shape(Y_2)[2], activation='softmax')(hidden)
model_lstm = Model(inputs=X, outputs = Y_1)
My question is How do I resolve the error?
I can not use the sequential API to solve the problem because I want to use Multiple Outputs to train i.e. I have 2 different outputs on which I want to train(But for the scope of this question let's just assume I have one set of input and one set of output)!!
The Entire error that I am getting is
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-66-df3a5a1656f0> in <module>
----> 1 model_lstm = Model(X, Y_1)
/root/anaconda3/envs/TensorPy36/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training.py in __init__(self, *args, **kwargs)
144
145 def __init__(self, *args, **kwargs):
--> 146 super(Model, self).__init__(*args, **kwargs)
147 _keras_api_gauge.get_cell('model').set(True)
148 # initializing _distribution_strategy here since it is possible to call
/root/anaconda3/envs/TensorPy36/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/network.py in __init__(self, *args, **kwargs)
165 'inputs' in kwargs and 'outputs' in kwargs):
166 # Graph network
--> 167 self._init_graph_network(*args, **kwargs)
168 else:
169 # Subclassed network
/root/anaconda3/envs/TensorPy36/lib/python3.7/site-packages/tensorflow_core/python/training/tracking/base.py in _method_wrapper(self, *args, **kwargs)
455 self._self_setattr_tracking = False # pylint: disable=protected-access
456 try:
--> 457 result = method(self, *args, **kwargs)
458 finally:
459 self._self_setattr_tracking = previous_value # pylint: disable=protected-access
/root/anaconda3/envs/TensorPy36/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/network.py in _init_graph_network(self, inputs, outputs, name, **kwargs)
268
269 if any(not hasattr(tensor, '_keras_history') for tensor in self.outputs):
--> 270 base_layer_utils.create_keras_history(self._nested_outputs)
271
272 self._base_init(name=name, **kwargs)
/root/anaconda3/envs/TensorPy36/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/base_layer_utils.py in create_keras_history(tensors)
182 keras_tensors: The Tensors found that came from a Keras Layer.
183 """
--> 184 _, created_layers = _create_keras_history_helper(tensors, set(), [])
185 return created_layers
186
/root/anaconda3/envs/TensorPy36/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/base_layer_utils.py in _create_keras_history_helper(tensors, processed_ops, created_layers)
208 if getattr(tensor, '_keras_history', None) is not None:
209 continue
--> 210 op = tensor.op # The Op that created this Tensor.
211 if op not in processed_ops:
212 # Recursively set `_keras_history`.
AttributeError: 'numpy.ndarray' object has no attribute 'op'
Update
I tried type cast X and Y_1 to the tensor objects as suggested in the comments. It is perfectly working in the case of Sequential API but failing for Fnctional API.
X_tensor = tf.convert_to_tensor(X, dtype=tf.float32)
y_tensor=tf.convert_to_tensor(Y_1, dtype=tf.int32)
model_lstm = Model(X_tensor, y_tensor)
Error
AttributeError: Tensor.op is meaningless when eager execution is enabled.
AttributeError Traceback (most recent call last)
<ipython-input-100-d090ea2b5a90> in <module>
----> 1 model_lstm = Model(X_tensor, y_tensor)
/root/anaconda3/envs/TensorPy36/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training.py in __init__(self, *args, **kwargs)
144
145 def __init__(self, *args, **kwargs):
--> 146 super(Model, self).__init__(*args, **kwargs)
147 _keras_api_gauge.get_cell('model').set(True)
148 # initializing _distribution_strategy here since it is possible to call
/root/anaconda3/envs/TensorPy36/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/network.py in __init__(self, *args, **kwargs)
165 'inputs' in kwargs and 'outputs' in kwargs):
166 # Graph network
--> 167 self._init_graph_network(*args, **kwargs)
168 else:
169 # Subclassed network
/root/anaconda3/envs/TensorPy36/lib/python3.7/site-packages/tensorflow_core/python/training/tracking/base.py in _method_wrapper(self, *args, **kwargs)
455 self._self_setattr_tracking = False # pylint: disable=protected-access
456 try:
--> 457 result = method(self, *args, **kwargs)
458 finally:
459 self._self_setattr_tracking = previous_value # pylint: disable=protected-access
/root/anaconda3/envs/TensorPy36/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/network.py in _init_graph_network(self, inputs, outputs, name, **kwargs)
268
269 if any(not hasattr(tensor, '_keras_history') for tensor in self.outputs):
--> 270 base_layer_utils.create_keras_history(self._nested_outputs)
271
272 self._base_init(name=name, **kwargs)
/root/anaconda3/envs/TensorPy36/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/base_layer_utils.py in create_keras_history(tensors)
182 keras_tensors: The Tensors found that came from a Keras Layer.
183 """
--> 184 _, created_layers = _create_keras_history_helper(tensors, set(), [])
185 return created_layers
186
/root/anaconda3/envs/TensorPy36/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/base_layer_utils.py in _create_keras_history_helper(tensors, processed_ops, created_layers)
208 if getattr(tensor, '_keras_history', None) is not None:
209 continue
--> 210 op = tensor.op # The Op that created this Tensor.
211 if op not in processed_ops:
212 # Recursively set `_keras_history`.
/root/anaconda3/envs/TensorPy36/lib/python3.7/site-packages/tensorflow_core/python/framework/ops.py in op(self)
1078 def op(self):
1079 raise AttributeError(
-> 1080 "Tensor.op is meaningless when eager execution is enabled.")
1081
1082 #property
AttributeError: Tensor.op is meaningless when eager execution is enabled.
I made a mistake in the code itself while executing the Model part of in the functional API version.
model_lstm = Model(inputs=X, outputs = Y_1)
I have given the variables inside the above part of the code only!! While in this part we just define what our model is going to be. Hence Here we will just write what is our input layer that needs to be considered and what will be my output layer here!! Input layer while constructing my model will be input_layer in the code and output layer will be output_1. Hence code should be
model_lstm = Model(inputs=input_layer, outputs = output_1)
and after that we can do
model_lstm.fit(X,Y_1)
This will work perfectly fine now!!

tensorflow model with keras and tensorflow_addons layer is not getting loaded

I have trained a model with keras layers and weight_normalization layer from tensorflow_addons. This is the model I trained and saved in tensorflow file format:
import tensorflow as tf
import tensorflow.keras as tk
import tensorflow_addons as tfa
model = tf.keras.Sequential([
tf.keras.layers.Input((X_train.shape[1]-1,)),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dropout(0.2),
tfa.layers.WeightNormalization(tf.keras.layers.Dense(2048, activation="relu")),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dropout(0.5),
tfa.layers.WeightNormalization(tf.keras.layers.Dense(1048, activation="relu")),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dropout(0.5),
tfa.layers.WeightNormalization(tf.keras.layers.Dense(206, activation="sigmoid")),
])
(and it has no custom metrics)
from keras.callbacks import ModelCheckpoint, EarlyStopping
# autosave best Model
best_model = ModelCheckpoint("model", monitor='val_accuracy', mode='max',verbose=0, save_best_only=True)
earlystop = EarlyStopping(monitor = 'val_accuracy',
patience = 15,
mode = 'max',
verbose = 1,
restore_best_weights = True)
callbacks = [best_model, earlystop]
model.compile(loss= 'binary_crossentropy',optimizer= 'Adam',metrics= ['accuracy'])
history = model.fit(X_res, y_res, epochs=100, verbose= 2, validation_data=(X_val[X_val.columns[1:]],y_val[y_val.columns[1:]]), callbacks=callbacks)
But when I load the model it returns an error:
model = tk.models.load_model("../input/model")
--------------------------------------------------------------------------- KeyError Traceback (most recent call
last) in
2 return
3
----> 4 model = tk.models.load_model("../input/model-custom", custom_objects={'__inference_dense_layer_call_fn_1126407':f1})
/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/saving/save.py
in load_model(filepath, custom_objects, compile, options)
185 if isinstance(filepath, six.string_types):
186 loader_impl.parse_saved_model(filepath)
--> 187 return saved_model_load.load(filepath, compile, options)
188
189 raise IOError(
/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/saving/saved_model/load.py
in load(path, compile, options)
119
120 model = tf_load.load_internal(
--> 121 path, options=options, loader_cls=KerasObjectLoader)
122
123 # pylint: disable=protected-access
/opt/conda/lib/python3.7/site-packages/tensorflow/python/saved_model/load.py
in load_internal(export_dir, tags, options, loader_cls)
631 try:
632 loader = loader_cls(object_graph_proto, saved_model_proto, export_dir,
--> 633 ckpt_options)
634 except errors.NotFoundError as err:
635 raise FileNotFoundError(
/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/saving/saved_model/load.py
in init(self, *args, **kwargs)
192 self._models_to_reconstruct = []
193
--> 194 super(KerasObjectLoader, self).init(*args, **kwargs)
195
196 # Now that the node object has been fully loaded, and the checkpoint has
/opt/conda/lib/python3.7/site-packages/tensorflow/python/saved_model/load.py
in init(self, object_graph_proto, saved_model_proto, export_dir,
ckpt_options)
128 self._concrete_functions[name] = _WrapperFunction(concrete_function)
129
--> 130 self._load_all()
131 self._restore_checkpoint()
132
/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/saving/saved_model/load.py
in _load_all(self)
216
217 # Load all other nodes and functions.
--> 218 super(KerasObjectLoader, self)._load_all()
219
220 # Finish setting up layers and models. See function docstring for more info.
/opt/conda/lib/python3.7/site-packages/tensorflow/python/saved_model/load.py
in _load_all(self)
139 def _load_all(self):
140 """Loads all nodes and functions from the SavedModel and their edges."""
--> 141 self._load_nodes()
142 self._load_edges()
143 # TODO(b/124045874): There are limitations with functions whose captures
/opt/conda/lib/python3.7/site-packages/tensorflow/python/saved_model/load.py
in _load_nodes(self)
281 # interface.
282 continue
--> 283 node, setter = self._recreate(proto, node_id)
284 nodes[node_id] = node
285 node_setters[node_id] = setter
/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/saving/saved_model/load.py
in _recreate(self, proto, node_id)
237 obj._handle_name = proto.variable.name + ':0' # pylint: disable=protected-access
238 else:
--> 239 obj, setter = super(KerasObjectLoader, self)._recreate(proto, node_id)
240 return obj, setter
241
/opt/conda/lib/python3.7/site-packages/tensorflow/python/saved_model/load.py
in _recreate(self, proto, node_id)
391 if kind not in factory:
392 raise ValueError("Unknown SavedObject type: %r" % kind)
--> 393 return factorykind
394
395 def _recreate_user_object(self, proto, node_id):
/opt/conda/lib/python3.7/site-packages/tensorflow/python/saved_model/load.py
in ()
380 lambda: self._recreate_user_object(proto.user_object, node_id)),
381 "asset": lambda: self._recreate_asset(proto.asset),
--> 382 "function": lambda: self._recreate_function(proto.function),
383 "bare_concrete_function": functools.partial(
384 self._recreate_bare_concrete_function,
/opt/conda/lib/python3.7/site-packages/tensorflow/python/saved_model/load.py
in _recreate_function(self, proto)
419 def _recreate_function(self, proto):
420 return function_deserialization.recreate_function(
--> 421 proto, self._concrete_functions), setattr
422
423 def _recreate_bare_concrete_function(self, proto):
/opt/conda/lib/python3.7/site-packages/tensorflow/python/saved_model/function_deserialization.py
in recreate_function(saved_function, concrete_functions)
259 concrete_function_objects = []
260 for concrete_function_name in saved_function.concrete_functions:
--> 261 concrete_function_objects.append(concrete_functions[concrete_function_name])
262
263 for cf in concrete_function_objects:
KeyError: '__inference_dense_layer_call_fn_1126407'
Can you please help me load the model correctly.. Thanks
I suspect that you have both keras and tensorflow installed separately; I have worked with tfa and never had problems with regard to such a loading matter;
In fact, here you import everything via tensorflow:
import tensorflow as tf
import tensorflow.keras as tk
import tensorflow_addons as tfa
But here you load the callbacks via plain keras:
from keras.callbacks import ModelCheckpoint, EarlyStopping
In order to first ensure that you do have a loading model problem situation, please make sure that every import is done via tensorflow.keras (I expect the problem to disappear altogether once you do this).
Replace
from keras.callbacks import ModelCheckpoint, EarlyStopping
with:
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
To sum up, retrain from scratch with the new imports (all from tensorflow.keras) and then check if the problem is reproduced.

tf.keras.estimator.model_to_estimator failing to convert keras model with custom and Lambda layers

I've written a model some time ago that uses a few custom layer definitions and has been trained using TF 1.12 and standalone Keras 2.2.4. I've updated the version of TF to 1.14 and switched over to tf.keras. Using a custom load function, my model builds, loads weights and generates predictions.
Now, I'm trying to convert my keras model to a TF Estimator that I could use for inference and I'm having all sorts of issues. I believe it stems from the get_config() method in my Lambda layers. I currently define them like this:
class NamedLambda(Lambda):
def __init__(self, name=None):
Lambda.__init__(self, self.fn, name=name)
#classmethod
def invoke(cls, args, **kw):
return cls(**kw)(args)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self.name)
class L2Normalize(NamedLambda):
def fn(self, x):
return K.l2_normalize(x, axis=-1)
When I check, the get_config method is working just fine:
custom_objects['l2_normalize'].get_config()
{'arguments': DictWrapper({}),
'dtype': 'float32',
'function': 'fn',
'function_type': 'function',
'module': 'grademachine.utils',
'name': 'l2_normalize',
'output_shape': None,
'output_shape_module': None,
'output_shape_type': 'raw',
'trainable': True}
Below is some example code and the traceback that has me stumped. Any help would be much appreciated.
Python version: 3.6.2
TensorFlow version: 1.14.0
Keras version: 2.2.4-tf
model = load_model(model_dir,
options_fn='model123_options',
weights_fn='model123_weights')
model
<tensorflow.python.keras.engine.training.Model at 0x7fe3d43d8e10>
est = tf.keras.estimator.model_to_estimator(keras_model=model)
I've also tried adding my custom layers as follows, which yields a slightly different traceback, but ultimately ends up in the same place. The traceback below is from the version with custom_objects defined:
# custom_layer_names is a list of names of each of the custom layers in the trained model
custom_objects = {l.name: l for l in model.layers if l.name in custom_layer_names}
est = tf.keras.estimator.model_to_estimator(keras_model=model,
custom_objects=custom_objects)
INFO:tensorflow:Using default config.
WARNING:tensorflow:Using temporary folder as model directory: /tmp/tmpyujm6s99
INFO:tensorflow:Using the Keras model provided.
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-13-512a382c338c> in <module>()
13 est = tf.keras.estimator.model_to_estimator(keras_model=model,
14 model_dir='saved_estimator/',
---> 15 custom_objects=custom_objects)
~/anaconda2/envs/berttf114/lib/python3.6/site-packages/tensorflow/python/keras/estimator/__init__.py in model_to_estimator(keras_model, keras_model_path, custom_objects, model_dir, config)
71 custom_objects=custom_objects,
72 model_dir=model_dir,
---> 73 config=config)
74
75 # LINT.ThenChange(//tensorflow_estimator/python/estimator/keras.py)
~/anaconda2/envs/berttf114/lib/python3.6/site-packages/tensorflow_estimator/python/estimator/keras.py in model_to_estimator(keras_model, keras_model_path, custom_objects, model_dir, config)
448 if keras_model._is_graph_network:
449 warm_start_path = _save_first_checkpoint(keras_model, custom_objects,
--> 450 config)
451 elif keras_model.built:
452 logging.warning('You are creating an Estimator from a Keras model manually '
~/anaconda2/envs/berttf114/lib/python3.6/site-packages/tensorflow_estimator/python/estimator/keras.py in _save_first_checkpoint(keras_model, custom_objects, config)
316 training_util.create_global_step()
317 model = _clone_and_build_model(ModeKeys.TRAIN, keras_model,
--> 318 custom_objects)
319 # save to checkpoint
320 with session.Session(config=config.session_config) as sess:
~/anaconda2/envs/berttf114/lib/python3.6/site-packages/tensorflow_estimator/python/estimator/keras.py in _clone_and_build_model(mode, keras_model, custom_objects, features, labels)
199 compile_clone=compile_clone,
200 in_place_reset=(not keras_model._is_graph_network),
--> 201 optimizer_iterations=global_step)
202
203 return clone
~/anaconda2/envs/berttf114/lib/python3.6/site-packages/tensorflow/python/keras/models.py in clone_and_build_model(model, input_tensors, target_tensors, custom_objects, compile_clone, in_place_reset, optimizer_iterations, optimizer_config)
534 if custom_objects:
535 with CustomObjectScope(custom_objects):
--> 536 clone = clone_model(model, input_tensors=input_tensors)
537 else:
538 clone = clone_model(model, input_tensors=input_tensors)
~/anaconda2/envs/berttf114/lib/python3.6/site-packages/tensorflow/python/keras/models.py in clone_model(model, input_tensors, clone_function)
324 else:
325 return _clone_functional_model(
--> 326 model, input_tensors=input_tensors, layer_fn=clone_function)
327
328
~/anaconda2/envs/berttf114/lib/python3.6/site-packages/tensorflow/python/keras/models.py in _clone_functional_model(model, input_tensors, layer_fn)
152 # Get or create layer.
153 if layer not in layer_map:
--> 154 new_layer = layer_fn(layer)
155 layer_map[layer] = new_layer
156 layer = new_layer
~/anaconda2/envs/berttf114/lib/python3.6/site-packages/tensorflow/python/keras/models.py in _clone_layer(layer)
52
53 def _clone_layer(layer):
---> 54 return layer.__class__.from_config(layer.get_config())
55
56
~/repos/grademachine/grademachine/utils.py in from_config(cls, config, custom_objects)
850 config = config.copy()
851 function = cls._parse_function_from_config(
--> 852 config, custom_objects, 'function', 'module', 'function_type')
853
854 output_shape = cls._parse_function_from_config(
~/repos/grademachine/grademachine/utils.py in _parse_function_from_config(cls, config, custom_objects, func_attr_name, module_attr_name, func_type_attr_name)
898 config[func_attr_name],
899 custom_objects=custom_objects,
--> 900 printable_module_name='function in Lambda layer')
901 elif function_type == 'lambda':
902 # Unsafe deserialization from bytecode
~/anaconda2/envs/berttf114/lib/python3.6/site-packages/tensorflow/python/keras/utils/generic_utils.py in deserialize_keras_object(identifier, module_objects, custom_objects, printable_module_name)
207 obj = _GLOBAL_CUSTOM_OBJECTS[object_name]
208 else:
--> 209 obj = module_objects.get(object_name)
210 if obj is None:
211 raise ValueError('Unknown ' + printable_module_name + ':' + object_name)
AttributeError: 'NoneType' object has no attribute 'get'

Initializing RNNCell

I'm having issues with using any child class of tensorflows RNNCell. According to the tensorflow source, the state for any RNNCell should be a tuple, however when I give it a tuple, it throws an error saying that it's trying to ask for the ndims attribute of the state tuple, which doesn't exist.
I am trying to create an LSTM where I can control each individual input.
This is the simplest code I can make, and even that has the same issue, so I hope that I am doing something wrong that can easily be fixed. This is the simple code:
lstm_layer = tf.contrib.rnn.LSTMCell(num_units = 64)
initial_state = lstm_layer.zero_state(batch_size=1,dtype=tf.float32)
initial_input = np.expand_dims(np.array([1,2,3,4,5,6,7,8]),0)
output_single, state_single = lstm_layer(inputs=initial_input,state=initial_state)
Here is the error I get:
AttributeError Traceback (most recent call last)
<ipython-input-22-1dcce10906e5> in <module>
2 initial_state = lstm_layer.zero_state(batch_size=1,dtype=tf.float32)
3 initial_input = np.expand_dims(np.array([1,2,3,4,5,6,7,8]),0)
----> 4 output_single, state_single = lstm_layer(inputs=initial_input,state=initial_state)
~/anaconda3/lib/python3.7/site-packages/tensorflow/python/ops/rnn_cell_impl.py in __call__(self, inputs, state, scope, *args, **kwargs)
369 # method. See the class docstring for more details.
370 return base_layer.Layer.__call__(self, inputs, state, scope=scope,
--> 371 *args, **kwargs)
372
373
~/anaconda3/lib/python3.7/site-packages/tensorflow/python/layers/base.py in __call__(self, inputs, *args, **kwargs)
528
529 # Actually call layer
--> 530 outputs = super(Layer, self).__call__(inputs, *args, **kwargs)
531
532 if not context.executing_eagerly():
~/anaconda3/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer.py in __call__(self, inputs, *args, **kwargs)
536 if not self.built:
537 # Build layer if applicable (if the `build` method has been overridden).
--> 538 self._maybe_build(inputs)
539 # We must set self.built since user defined build functions are not
540 # constrained to set self.built.
~/anaconda3/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer.py in _maybe_build(self, inputs)
1589 # Check input assumptions set before layer building, e.g. input rank.
1590 input_spec.assert_input_compatibility(
-> 1591 self.input_spec, inputs, self.name)
1592 input_list = nest.flatten(inputs)
1593 if input_list and self._dtype is None:
~/anaconda3/lib/python3.7/site-packages/tensorflow/python/keras/engine/input_spec.py in assert_input_compatibility(input_spec, inputs, layer_name)
107 spec.min_ndim is not None or
108 spec.max_ndim is not None):
--> 109 if x.shape.ndims is None:
110 raise ValueError('Input ' + str(input_index) + ' of layer ' +
111 layer_name + ' is incompatible with the layer: '
AttributeError: 'tuple' object has no attribute 'ndims'
It looks as though there is some base method that gets called for all layers in the keras API, but it doesn't work with tuples. However, I find it strange that this would be an issue that nobody has ever come against before. So I hope its just me making a mistake
I figured out the issue. Tensorflow wasn't playing nicely with numpy in this case. Instead of
initial_input = np.expand_dims(np.array([1,2,3,4,5,6,7,8]),0)
I needed to give it
initial_input = tf.expand_dims(np.array([1,2,3,4,5,6,7,8],dtype=np.float32),0)

Input tensors to a Model must come from `tf.layers.Input` when I concatenate two models with Keras API on Tensorflow

I'm creating a wide and deep model using Keras functional API on tensorflow.
When I try to merge the two models, the below error occurred.
--------------------------------------------------------------------------- ValueError Traceback (most recent call
last) in ()
1 merged_out = tf.keras.layers.concatenate([wide_model.output, deep_model.output])
2 merged_out = tf.keras.layers.Dense(1)(merged_out)
----> 3 combined_model = tf.keras.Model(inputs=wide_model.input + [deep_model.input], outputs=merged_out)
4 print(combined_model.summary())
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py
in init(self, *args, **kwargs)
111
112 def init(self, *args, **kwargs):
--> 113 super(Model, self).init(*args, **kwargs)
114 # Create a cache for iterator get_next op.
115 self._iterator_get_next = weakref.WeakKeyDictionary()
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/network.py
in init(self, *args, **kwargs)
77 'inputs' in kwargs and 'outputs' in kwargs):
78 # Graph network
---> 79 self._init_graph_network(*args, **kwargs)
80 else:
81 # Subclassed network
/usr/local/lib/python3.6/dist-packages/tensorflow/python/training/checkpointable/base.py
in _method_wrapper(self, *args, **kwargs)
362 self._setattr_tracking = False # pylint: disable=protected-access
363 try:
--> 364 method(self, *args, **kwargs)
365 finally:
366 self._setattr_tracking = previous_value # pylint: disable=protected-access
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/network.py
in _init_graph_network(self, inputs, outputs, name)
193 'must come from tf.layers.Input. '
194 'Received: ' + str(x) +
--> 195 ' (missing previous layer metadata).')
196 # Check that x is an input tensor.
197 # pylint: disable=protected-access
ValueError: Input tensors to a Model must come from tf.layers.Input.
Received: Tensor("add_1:0", shape=(1, ?, 163), dtype=float32) (missing
previous layer metadata).
Here is the code for concatenating the two.
merged_out = tf.keras.layers.concatenate([wide_model.output, deep_model.output])
merged_out = tf.keras.layers.Dense(1)(merged_out)
combined_model = tf.keras.Model(inputs=wide_model.input + [deep_model.input], outputs=merged_out)
print(combined_model.summary())
For each model's inputs, I tried using tf.layers.Inputwith
inputs = tf.placeholder(tf.float32, shape=(None,X_resampled.shape[1]))
deep_inputs = tf.keras.Input(tensor=(inputs))
to make them tf.layers.Input as this page mentions.
But I'm still facing the same issue.
I'm using tensorflow==1.10.0
Could someone help me solving this issue?
Thanks!
In inputs=wide_model.input + [deep_model.input], wide.model.input is probably not a list, so that you are passing a new Add tensor instead of a list of inputs. Try passing inputs=[wide_model.input] + [deep_model.input] instead