Unable to import a pretrained model after calling Keras.backend.clear_session() - tensorflow

I am trying to train a model with new data samples in each iteration in a loop in keras (using tensorflow backend). Due to GPU memory error after some iterations, I appended K.clear_session(). However, after one iteration, the code throws the error:
'Cannot interpret feed_dict key as Tensor: ' + e.args[0])
TypeError: Cannot interpret feed_dict key as Tensor: Tensor Tensor("Placeholder:0", shape=(7, 7, 3, 64), dtype=float32) is not an element of this graph.
If I remove K.clear_session() at end, there is no error. Is there anyone who can explain why this error comes in second iteration?
I tried other methods (for gpu release) but none of them worked and this is my last option. But it throws error. I have pasted an example code which can produce the error. Please NOTE that this is not the actual code, I just made an example to reproduce the error which I am facing in actual code.
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import tensorflow as tf
import random
seed_value= 0
import os
import keras
os.environ['PYTHONHASHSEED']=str(seed_value)
random.seed(0)
np.random.seed(0)
from keras import backend as K
from keras.datasets import cifar10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
for i in range(3):
base_model = tf.keras.applications.resnet50.ResNet50(weights='imagenet', input_shape=(32, 32, 3),
include_top=False)
x = base_model.output
x = tf.keras.layers.GlobalAveragePooling2D()(x)
output = tf.keras.layers.Dense(10, activation='softmax',
kernel_initializer=tf.keras.initializers.RandomNormal(seed=4))(x)
model = tf.keras.Model(inputs=base_model.input, outputs=output)
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
for layer in base_model.layers:
layer.trainable = False
optimizer = tf.train.AdamOptimizer(learning_rate=0.0001)
model.compile(optimizer=optimizer, loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train,y_train,batch_size=1024,epochs=1,verbose=1)
K.clear_session()
Traceback (most recent call last):
File "C:\Users\sirshad\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\client\session.py", line 1092, in _run
subfeed, allow_tensor=True, allow_operation=False)
File "C:\Users\sirshad\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\framework\ops.py", line 3490, in as_graph_element
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
File "C:\Users\sirshad\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\framework\ops.py", line 3569, in _as_graph_element_locked
raise ValueError("Tensor %s is not an element of this graph." % obj)
ValueError: Tensor Tensor("Placeholder:0", shape=(7, 7, 3, 64), dtype=float32) is not an element of this graph.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "E:/codes/experiments-AL/breakhis/40X-M-B/codes-AL/error_debug.py", line 22, in <module>
include_top=False)
File "C:\Users\sirshad\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\keras\applications\__init__.py", line 70, in wrapper
return base_fun(*args, **kwargs)
File "C:\Users\sirshad\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\keras\applications\resnet50.py", line 32, in ResNet50
return resnet50.ResNet50(*args, **kwargs)
File "C:\Users\sirshad\AppData\Local\Programs\Python\Python36\lib\site-packages\keras_applications\resnet50.py", line 291, in ResNet50
model.load_weights(weights_path)
File "C:\Users\sirshad\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\keras\engine\network.py", line 1544, in load_weights
saving.load_weights_from_hdf5_group(f, self.layers)
File "C:\Users\sirshad\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\keras\engine\saving.py", line 806, in load_weights_from_hdf5_group
K.batch_set_value(weight_value_tuples)
File "C:\Users\sirshad\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\keras\backend.py", line 2784, in batch_set_value
get_session().run(assign_ops, feed_dict=feed_dict)
File "C:\Users\sirshad\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\client\session.py", line 929, in run
run_metadata_ptr)
File "C:\Users\sirshad\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\client\session.py", line 1095, in _run
'Cannot interpret feed_dict key as Tensor: ' + e.args[0])
TypeError: Cannot interpret feed_dict key as Tensor: Tensor Tensor("Placeholder:0", shape=(7, 7, 3, 64), dtype=float32) is not an element of this graph.
Process finished with exit code 1

I was able to overcome this issue by saving the imagenet pre-trained model to disk and then loading everytime in loop after I call tf.keras.backend.clear_session(). So saving the base model to file and then loading works. But I am still confused why it did not work before with
base_model = tf.keras.applications.resnet50.ResNet50

Related

Multiple inputs and one output in TensorFlow

I am very new in using TF. I would like build a model with 6 input and 1 output. My code is available below.
import tensorflow as tf
import numpy as np
import pandas as pd
from tensorflow.keras import layers
import matplotlib.pyplot as plt
abalone_train = pd.read_csv("sipm2.csv",
names=["Hit1", "Time1", "Hit2", "Time2", "Hit3",
"Time3", "PosX"])
abalone_train.head()
abalone_features = abalone_train.copy()
abalone_labels = abalone_features.pop('PosX')
abalone_features = np.array(abalone_features)
print(abalone_features)
abalone_model = tf.keras.Sequential([
layers.Dense(64),
layers.Dense(64)
])
abalone_model.compile(loss='mean_squared_error', optimizer=tf.keras.optimizers.Adam(0.1), metrics=['mean_squared_error'])
history = abalone_model.fit(abalone_features, abalone_labels, epochs=100)
abalone_model.summary()
print("Finished training the model")
#plt.xlabel('Epoch Number')
#plt.ylabel("Loss Magnitude")
#plt.plot(history.history['loss'])
#plt.show()
posx_prediction = abalone_model.predict([75., 1., 75. ,1.3 ,66. ,2.])
print(posx_prediction)
The error I got is:
WARNING:tensorflow:Model was constructed with shape (None, 6) for input KerasTensor(type_spec=TensorSpec(shape=(None, 6), dtype=tf.float32, name='dense_input'), name='dense_input', description="created by layer 'dense_input'"), but it was called on an input with incompatible shape (None,).
Traceback (most recent call last):
File "/Users/sertac/Work/TensorFlow/test3.py", line 39, in <module>
posx_prediction = abalone_model.predict([75., 1., 75. ,1.3 ,66. ,2.])
File "/Users/sertac/Library/Python/3.9/lib/python/site-packages/keras/utils/traceback_utils.py", line 67, in error_handler
raise e.with_traceback(filtered_tb) from None
File "/Users/sertac/Library/Python/3.9/lib/python/site-packages/tensorflow/python/framework/func_graph.py", line 1147, in autograph_handler
raise e.ag_error_metadata.to_exception(e)
ValueError: in user code:
File "/Users/sertac/Library/Python/3.9/lib/python/site-packages/keras/engine/training.py", line 1801, in predict_function *
return step_function(self, iterator)
File "/Users/sertac/Library/Python/3.9/lib/python/site-packages/keras/engine/training.py", line 1790, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "/Users/sertac/Library/Python/3.9/lib/python/site-packages/keras/engine/training.py", line 1783, in run_step **
outputs = model.predict_step(data)
File "/Users/sertac/Library/Python/3.9/lib/python/site-packages/keras/engine/training.py", line 1751, in predict_step
return self(x, training=False)
File "/Users/sertac/Library/Python/3.9/lib/python/site-packages/keras/utils/traceback_utils.py", line 67, in error_handler
raise e.with_traceback(filtered_tb) from None
File "/Users/sertac/Library/Python/3.9/lib/python/site-packages/keras/engine/input_spec.py", line 228, in assert_input_compatibility
raise ValueError(f'Input {input_index} of layer "{layer_name}" '
ValueError: Exception encountered when calling layer "sequential" (type Sequential).
Input 0 of layer "dense" is incompatible with the layer: expected min_ndim=2, found ndim=1. Full shape received: (None,)
Call arguments received:
• inputs=tf.Tensor(shape=(None,), dtype=float32)
• training=False
• mask=None
I would like appreciate if you could help me how to get prediction?
Thanks in advance.
Sipm2.csv
104.241,1.12209,67.51,1.30428,57.354,1.48099,3.9
104.796,1.12425,67.787,1.3103,58.31,1.47481,3.5
103.203,1.10605,67.039,1.29599,56.825,1.47089,3.0
84.857,1.12028,80.898,1.23234,62.08,1.39148,2.5
86.015,1.16361,80.656,1.20233,62.945,1.37363,1.5
83.987,1.20226,80.211,1.17464,62.002,1.36756,1.0
85.35,1.21761,81.777,1.15021,62.541,1.34058,0.5
For 6 input and 1 output, your model should look something like below.
abalone_model = tf.keras.Sequential([
tf.keras.Input(shape=(6,))
layers.Dense(64),
layers.Dense(1, activation='softmax')
])

L2-normalization with Keras Backend?

I'd like to normalize the inputs going into my neural network but, as I'm defining my model in this way:
df = pd.read_csv(r'C:\Users\Davide Mori\PycharmProjects\pythonProject\Dataset.csv')
print(df)
target_column = ['W_mag', 'W_phase']
predictors = list(set(list(df.columns)) - set(target_column))
X = df[predictors].values
Y = df[target_column].values
def get_model(n_inputs, n_outputs):
model = Sequential()
model.add(Dense(1000,input_dim= n_inputs, activation='relu'))
#model.add(Lambda(lambda x: K.l2_normalize(x, axis=1)))
model.add(Dense(1000, activation='linear', activity_regularizer=regularizers.l1(0.0001)))
model.add(Activation('relu'))
model.add(Dense(n_outputs, activation='linear'))
model.compile(optimizer="adam", loss="mean_squared_error", metrics=["mean_squared_error"])
model.summary()
return model
n_inputs, n_outputs = X.shape[1], Y.shape[1]
model = get_model(n_inputs, n_outputs)
# fit the model on all data
model.fit(X, Y, epochs=100, batch_size=1)
how do I apply the lambda layer to my inputs? Isn't wrong the commented line position? Because If I put the lambda layer there I'm normalizing what is already be "transformed" by the first hidden layer,right? How can I solve this problem?
This is the error I have when putting the lambda layer before everything else :
2020-10-12 15:08:46.036872: I
tensorflow/core/platform/cpu_feature_guard.cc:142] Your CPU supports
instructions that this TensorFlow binary was not compiled to use: AVX AVX2
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "C:\Program Files\JetBrains\PyCharm
2020.2.2\plugins\python\helpers\pydev\_pydev_bundle\pydev_umd.py", line 197,
in runfile
pydev_imports.execfile(filename, global_vars, local_vars) # execute the
script
File "C:\Program Files\JetBrains\PyCharm
2020.2.2\plugins\python\helpers\pydev\_pydev_imps\_pydev_execfile.py", line
18, in execfile
exec(compile(contents+"\n", file, 'exec'), glob, loc)
File "C:/Users/Davide Mori/PycharmProjects/pythonProject/prova_rete_sfs.py",
line 60, in <module>
model = get_model(n_inputs, n_outputs)
File "C:/Users/Davide Mori/PycharmProjects/pythonProject/prova_rete_sfs.py",
line 52, in get_model
model.summary()
File "C:\Users\Davide Mori\Anaconda3\envs\pythonProject\lib\site-
packages\tensorflow_core\python\keras\engine\network.py", line 1302, in
summary
raise ValueError('This model has not yet been built. '
ValueError: This model has not yet been built. Build the model first by
calling `build()` or calling `fit()` with some data, or specify an
`input_shape` argument in the first layer(s) for automatic build.

Tensorflow cannot quantize reshape function

I am going to train my model quantization aware. However, when i use it , the tensorflow_model_optimization cannot quantize tf.reshape function , and throws an error.
tensorflow version : '2.4.0-dev20200903'
python version : 3.6.9
the code:
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '3'
from tensorflow.keras.applications import VGG16
import tensorflow_model_optimization as tfmot
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
quantize_model = tfmot.quantization.keras.quantize_model
inputs = keras.Input(shape=(784,))
# img_inputs = keras.Input(shape=(32, 32, 3))
dense = layers.Dense(64, activation="relu")
x = dense(inputs)
x = layers.Dense(64, activation="relu")(x)
outputs = layers.Dense(10)(x)
outputs = tf.reshape(outputs, [-1, 2, 5])
model = keras.Model(inputs=inputs, outputs=outputs, name="mnist_model")
# keras.utils.plot_model(model, "my_first_model.png")
q_aware_model = quantize_model(model)
and the output:
Traceback (most recent call last):
File "<ipython-input-39-af601b78c010>", line 14, in <module>
q_aware_model = quantize_model(model)
File "/home/essys/.local/lib/python3.6/site-packages/tensorflow_model_optimization/python/core/quantization/keras/quantize.py", line 137, in quantize_model
annotated_model = quantize_annotate_model(to_quantize)
File "/home/essys/.local/lib/python3.6/site-packages/tensorflow_model_optimization/python/core/quantization/keras/quantize.py", line 210, in quantize_annotate_model
to_annotate, input_tensors=None, clone_function=_add_quant_wrapper)
...
File "/home/essys/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/autograph/impl/api.py", line 667, in wrapper
raise e.ag_error_metadata.to_exception(e)
TypeError: in user code:
TypeError: tf__call() got an unexpected keyword argument 'shape'
If somebody know, please help ?
The reason behind is because your layer is not yet support for QAT at the moment. If you want to quantize it, you have to self writing your quantization by quantize_annotate_layer and pass it through quantize_scope and apply to your model by quantize_apply as describe in here: https://www.tensorflow.org/model_optimization/guide/quantization/training_comprehensive_guide?hl=en#quantize_custom_keras_layer
I have create a batch_norm_layer in here as an example
Tensorflow 2.x is not complete for QAT layer, pls consider using tf1.x by adding FakeQuant after operators.

convLSTM2d w/ functional API

I have an autoencoder for image compression, where the encoded tensor has the shape: (batch_size, 12, 64, 48).
batch_size is the number of images being fed in a batch,
12 is the number of channels of this last encoder layer, which has a
64x48 width/height.
I want to input this to a ConvLSTM2D layer, and i would like the output of the ConvLSTM2D to have the same dimension as the input of the ConvLSTM2D.
The intention is to see image reconstruction on a video sequence, rather than unordered images from a dataset.
Placing a ConvLSTM2d between an encoder/decoder in a autoencoder architecture has been difficult, especially because most examples use the Sequential API, and i want to use the functional API in Keras.
I tried reshaping the input but the error persists
import tensorflow as tf
import tensorflow.keras.backend as K
def LSTM_layer(input):
input = tf.keras.backend.expand_dims(input, axis=-1)
lstm1 = tf.keras.layers.ConvLSTM2D(filters=12, kernel_size=(3, 3), strides=(1, 1), data_format="channels_first",
input_shape=(None, 12, 64, 48),
padding='same', return_sequences=True)(input)
return lstm1
def build_model(input_shape):
#create an input with input_shape as the size
input_ = tf.keras.Input(shape=input_shape, name="input_node")
lstm_features = LSTM_layer(input_)
model = tf.keras.Model(inputs=input_, outputs=[lstm_features])
return model
def main():
input_shape = (12, 64, 48) #this is the size of the tensor which is outputted by my encoder, with channels_first assumed
model = build_model(input_shape)
if __name__ == '__main__':
main()
Unfortunately, this is throwing this error:
Traceback (most recent call last):
File "lstm.py", line 29, in <module>
main()
File "lstm.py", line 26, in main
model = build_model(input_shape)
File "lstm.py", line 20, in build_model
model = tf.keras.Model(inputs=input_, outputs=[lstm_features])
File "/home/hallab/.local/lib/python3.5/site-packages/tensorflow/python/keras/engine/training.py", line 121, in __init__
super(Model, self).__init__(*args, **kwargs)
File "/home/hallab/.local/lib/python3.5/site-packages/tensorflow/python/keras/engine/network.py", line 80, in __init__
self._init_graph_network(*args, **kwargs)
File "/home/hallab/.local/lib/python3.5/site-packages/tensorflow/python/training/checkpointable/base.py", line 474, in _method_wrapper
method(self, *args, **kwargs)
File "/home/hallab/.local/lib/python3.5/site-packages/tensorflow/python/keras/engine/network.py", line 224, in _init_graph_network
'(thus holding past layer metadata). Found: ' + str(x))
ValueError: Output tensors to a Model must be the output of a TensorFlow `Layer` (thus holding past layer metadata). Found: Tensor("conv_lst_m2d/transpose_1:0", shape=(?, 12, 12, 48, 1), dtype=float32)
Most posts about this error instruct to wrap the operation in a lambda.. but i am not implementing a custom operation here, this should be a keras tf layer... right?
Also, in my implementation, i want the output tensor from the LSTM unit to be the same as the input, can i get some feedback about that as well?
Thank you.
You could use Lambda to wrap the output form K.expand_dims before input it to next layer like this:
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.layers import Lambda
def expand_dims(x):
return K.expand_dims(x, 1)
def expand_dims_output_shape(input_shape):
return (input_shape[0], 1, input_shape[1])
def LSTM_layer(input_):
lstm1 = Lambda(expand_dims, expand_dims_output_shape)(input_)
lstm1 = tf.keras.layers.ConvLSTM2D(filters=12, kernel_size=(3, 3), strides=(1, 1), data_format="channels_first", padding='same', return_sequences=False)(lstm1)
return lstm1

Keras: Bug that depends on how many layers in network

I am having trouble with a seemingly arbitrary bug when using keras. I run into the error "NotFoundError: FeedInputs: unable to find feed output dense_3_target:0" when trying to build a model in keras. The error seems to depend on the number of layers I put in the network (bug when number of layers not equal to 4). Does anyone know what is going on here?
The code and error message:
import tensorflow as tf
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense
tf.reset_default_graph()
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("mnist", one_hot=True)
X_train = mnist.train.images
y_train = mnist.train.labels
X_test = mnist.test.images
y_test = mnist.test.labels
# Hyper Parameters
n_features = 784
n_classes = 10
learning_rate = 0.5
training_epochs = 2
model = Sequential()
model.add(Dense(units = 100, activation = 'relu', input_dim = n_features))
model.add(Dense(units = 50,activation = 'relu'))
model.add(Dense(50,activation = 'relu'))
model.add(Dense(units = n_classes, activation = 'softmax'))
# Step 3: Compile the Model
model.compile(optimizer='adam',loss='categorical_crossentropy')
## Step 4: Train the Model
history = model.fit(X_train,y_train,epochs=10,batch_size = 100,validation_data=(X_test,y_test))
===================================================================
File "<ipython-input-14-1076cda88cc6>", line 43, in <module>
history = model.fit(X_train,y_train,epochs=10,batch_size = 100,validation_data=(X_test,y_test))
File "/Users/liyuan/anaconda2/envs/tensorflow35/lib/python3.6/site-packages/keras/engine/training.py", line 1037, in fit
validation_steps=validation_steps)
File "/Users/liyuan/anaconda2/envs/tensorflow35/lib/python3.6/site-packages/keras/engine/training_arrays.py", line 199, in fit_loop
outs = f(ins_batch)
File "/Users/liyuan/anaconda2/envs/tensorflow35/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py", line 2666, in __call__
return self._call(inputs)
File "/Users/liyuan/anaconda2/envs/tensorflow35/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py", line 2635, in _call
session)
File "/Users/liyuan/anaconda2/envs/tensorflow35/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py", line 2587, in _make_callable
callable_fn = session._make_callable_from_options(callable_opts)
File "/Users/liyuan/anaconda2/envs/tensorflow35/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1480, in _make_callable_from_options
return BaseSession._Callable(self, callable_options)
File "/Users/liyuan/anaconda2/envs/tensorflow35/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1441, in __init__
session._session, options_ptr, status)
File "/Users/liyuan/anaconda2/envs/tensorflow35/lib/python3.6/site-packages/tensorflow/python/framework/errors_impl.py", line 519, in __exit__
c_api.TF_GetCode(self.status.status))
NotFoundError: FeedInputs: unable to find feed output dense_3_target:0