How to use tf.py_func in keras lambda layer to wrap python code. ValueError: The last dimension of the inputs to Dense should be defined. Found None - numpy

I would like to wrap numpy codes using tf.py_fucn in a customized lambda layer using keras.
Notice: only for simplicity I'm showing a simple np.power function.
Here is what I've done
def my_func(x):
return np.power(x, 2)
def my_lambda_func(x):
return tf.py_function(my_func, [x], tf.float32)
def model():
inp = Input(shape=(2,))
x = Dense(128)(inp)
x = Dense(128)(x)
z = Lambda(my_lambda_func)(x)
output = Dense(1)(z)
model = Model(inputs=inp, outputs=output)
return model
model = model ()
model.compile(optimizer='adam', loss='mse')
Then I get this error
ValueError Traceback (most recent call last)
in ()
21
22
---> 23 model = model ()
24 model.compile(optimizer='adam', loss='mse')
3 frames
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/layers/core.py in build(self, input_shape)
1179 last_dim = tensor_shape.dimension_value(input_shape[-1])
1180 if last_dim is None:
-> 1181 raise ValueError('The last dimension of the inputs to Dense '
1182 'should be defined. Found None.')
1183 self.input_spec = InputSpec(min_ndim=2, axes={-1: last_dim})
ValueError: The last dimension of the inputs to Dense should be defined. Found None.

You may need to specify the output shape of the Lambda layer z, the tf.py_function would give None as output shape which does not sit well with the Dense layer that follows it. You should try:
z = Lambda(my_lambda_func)(x)
z.set_shape(x.shape)

Related

I cannot unpack non-iterable NoneType object

I am trying to implement a deep dream model using ResNet. there are many problems appear, one of them is the proble says that the object being unpacked is of type "NoneType," which means it has a value of None. In Python, you cannot unpack an object of type "NoneType" because it is not iterable. sometimes there is an error says that the value being converted is of type "NoneType" and that this type is unsupported. This means that TensorFlow cannot create an EagerTensor from a value of None.
this is a piece of code we try to use for solving error but it fail. and the second code is the all code
def deep_dream(input_image, model, steps=100, step_size=0.01):
# Define the loss and the optimizer
loss, intermediate_layer_model = calc_loss(input_image, model)
optimizer = tf.optimizers.SGD(learning_rate=step_size)
# Keep a list to hold the evolution of the image
image_list = []
# Run the optimization
for i in range(steps):
with tf.GradientTape() as tape:
tape.watch(input_image)
loss = calc_loss(input_image, model)[0]
grads, = tape.gradient(loss, input_image)
grads = tape.gradient(loss, input_image)
if grads is None:
return
optimizer.apply_gradients([(grads, input_image)])
image_list.append(input_image.numpy().copy())
# Return the final image
return input_image
# Load the ResNet50 model
#model = ResNet50(weights='imagenet')
from tensorflow import keras
model = keras.applications.ResNet50(weights='imagenet', include_top=False)
# Iterate over the layers in the ResNet50 model
for layer in model.layers:
print(f'{layer.name}---> {layer.output_shape}')
import cv2
# Function to calculate the loss
def calc_loss(input_image, model):
input_image_batch = tf.expand_dims(input_image, axis=0)
preprocessed_input = preprocess_input(input_image_batch.numpy().copy())
# Get the activations of a specific layer
layer_name = "conv5_block2_1_bn"
intermediate_layer_model = tf.keras.models.Model(inputs=model.input, outputs=model.get_layer(layer_name).output)
intermediate_output = intermediate_layer_model(preprocessed_input)
# Define the loss
loss = tf.math.reduce_mean(intermediate_output)
return loss, intermediate_layer_model
def deep_dream(input_image, model, steps=100, step_size=0.01):
for i in range(steps):
with tf.GradientTape() as tape:
# Compute the loss
loss, model_ = calc_loss(input_image, model)
# Get the gradients of the input image with respect to the loss
grads = tape.gradient(loss, input_image)
# Normalize the gradients
grads /= tf.math.reduce_std(grads) + 1e-8
# Update the input image
input_image += grads * step_size
input_image = tf.clip_by_value(input_image, 0, 255)
return input_image
# Load an image
# Load an image
#img_path = '/content/drive/MyDrive/Baghdad images/market.png'
#img = cv2.imread(img_path)
#img = cv2.resize(img, (224, 224))
#img = np.array(img, dtype=float)
# Preprocess the image
#original_image = np.copy(img)
#img = preprocess_input(np.expand_dims(img, axis=0))
img_path = cv2.resize(cv2.imread('/content/drive/MyDrive/Baghdad images/market.png'), (224, 224))
#img = image.load_img(img_path, target_size=(224, 224))
img = image.img_to_array(img)
# Preprocess the image
original_image = np.copy(img)
#img = preprocess_input(np.expand_dims(img, axis=0))
#input_image.set_shape([1,224,224,3])
#input_image = tf.constant(img, dtype=tf.float32)
#input_image = tf.expand_dims(input_image, axis=0)
#input_image = tf.squeeze(input_image, axis=0)
# Convert the image to a Tensor
input_image = tf.constant(img, dtype=tf.float32)
# Run the deep dream algorithm
dream_img = deep_dream(input_image, model)
# Deprocess the image
dream_img = tf.clip_by_value(dream_img[0], 0, 255).numpy().astype('uint8')
# Plot the original and dream images
plt.figure(figsize=(10,10))
plt.subplot(121)
plt.imshow(original_image.astype('uint8'))
plt.axis('off')
plt.title('Original Image')
plt.subplot(122)
plt.imshow(dream_img)
plt.axis('off')
plt.title('Dream Image')
plt.show()
I try to build deep dream model with ResNet to generate deep dream image, but the result is an error.
this error
TypeError Traceback (most recent call last)
<ipython-input-95-b14eebcfe038> in <module>
1 # Run the deep dream algorithm
----> 2 dream_img = deep_dream(input_image, model)
<ipython-input-92-27c78a6e0618> in deep_dream(input_image, model, steps, step_size)
12 tape.watch(input_image)
13 loss = calc_loss(input_image, model)[0]
---> 14 grads, = tape.gradient(loss, input_image)
15 grads = tape.gradient(loss, input_image)
16 if grads is None:
TypeError: cannot unpack non-iterable NoneType object
and this error
ValueError Traceback (most recent call last)
<ipython-input-48-09c4ef33f56c> in <module>
72
73 # Run the deep dream algorithm
---> 74 dream_img = deep_dream(input_image, model)
75
76 # Deprocess the image
16 frames
/usr/local/lib/python3.8/dist-packages/tensorflow/python/framework/constant_op.py in convert_to_eager_tensor(value, ctx, dtype)
96 dtype = dtypes.as_dtype(dtype).as_datatype_enum
97 ctx.ensure_initialized()
---> 98 return ops.EagerTensor(value, ctx.device_name, dtype)
99
100
ValueError: Attempt to convert a value (None) with an unsupported type (<class 'NoneType'>) to a Tensor.

Keras AttributeError: 'Functional' object has no attribute 'shape'

Trying to add Densenet121 functional block to the model.
I need Keras model to be written in this format, not using
model=Sequential()
model.add()
method
What's wrong the function, build_img_encod
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-62-69dd207148e0> in <module>()
----> 1 x = build_img_encod()
3 frames
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/input_spec.py in assert_input_compatibility(input_spec, inputs, layer_name)
164 spec.min_ndim is not None or
165 spec.max_ndim is not None):
--> 166 if x.shape.ndims is None:
167 raise ValueError('Input ' + str(input_index) + ' of layer ' +
168 layer_name + ' is incompatible with the layer: '
AttributeError: 'Functional' object has no attribute 'shape'
def build_img_encod( ):
base_model = DenseNet121(input_shape=(150,150,3),
include_top=False,
weights='imagenet')
for layer in base_model.layers:
layer.trainable = False
flatten = Flatten(name="flatten")(base_model)
img_dense_encoder = Dense(1024, activation='relu',name="img_dense_encoder", kernel_regularizer=regularizers.l2(0.0001))(flatten)
model = keras.models.Model(inputs=base_model, outputs = img_dense_encoder)
return model
The reason why you get that error is that you need to provide the input_shape of the base_model, instead of the base_model per say.
Replace this line: model = keras.models.Model(inputs=base_model, outputs = img_dense_encoder)
with: model = keras.models.Model(inputs=base_model.input, outputs = img_dense_encoder)
def build_img_encod( ):
dense = DenseNet121(input_shape=(150,150,3),
include_top=False,
weights='imagenet')
for layer in dense.layers:
layer.trainable = False
img_input = Input(shape=(150,150,3))
base_model = dense(img_input)
flatten = Flatten(name="flatten")(base_model)
img_dense_encoder = Dense(1024, activation='relu',name="img_dense_encoder", kernel_regularizer=regularizers.l2(0.0001))(flatten)
model = keras.models.Model(inputs=img_input, outputs = img_dense_encoder)
return model
This worked..

I'm getting "Tensor.op is meaningless when eager execution is enabled." in my simple encoder model. (TF 2.0)

The code of my encoder model is given below, I have made it using functional API(TF 2.0)
embed_obj = EndTokenLayer()
def encoder_model(inp):
input_1 = embed_obj(inp)
h = Masking([(lambda x: x*0)(x) for x in range(128)])(input_1)
lstm1 , state_h, state_c = LSTM(512, return_sequences=True, return_state=True)(h)
model = Model(inputs=input_1, outputs=[lstm1, state_h, state_c])
return model
And when I'm calling my model:
for x,y in train.take(1):
k = x
model = encoder_model(k)
I'm getting the following error:
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-98-46e9c9596137> in <module>()
2 for x,y in train.take(1):
3 k = x
----> 4 model = encoder_model(k)
7 frames
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py in op(self)
1111 def op(self):
1112 raise AttributeError(
-> 1113 "Tensor.op is meaningless when eager execution is enabled.")
1114
1115 #property
AttributeError: Tensor.op is meaningless when eager execution is enabled.
In TF2, static graph (preventing eager execution with dynamic graph) can be constructed by using a decorator.
Try #tf.function decorator
#tf.function
def encoder_model(inp):
input_1 = embed_obj(inp)
h = Masking([(lambda x: x*0)(x) for x in range(128)])(input_1)
lstm1 , state_h, state_c = LSTM(512, return_sequences=True, return_state=True)(h)
model = Model(inputs=input_1, outputs=[lstm1, state_h, state_c])
return model
Then call the function
for x,y in train.take(1):
k = x
model = encoder_model(k)

Custom Attention Layer using in Keras

I want to create a custom attention layer that for input at any time this layer returns the weighted mean of inputs at all time inputs.
For Example, I want that input tensor with shape [32,100,2048] goes to layer and I get the tensor with the shape [32,100,2048]. I wrote the Layer as follow:
import tensorflow as tf
from keras.layers import Layer, Dense
#or
from tensorflow.keras.layers import Layer, Dense
class Attention(Layer):
def __init__(self, units_att):
self.units_att = units_att
self.W = Dense(units_att)
self.V = Dense(1)
super().__init__()
def __call__(self, values):
t = tf.constant(0, dtype= tf.int32)
time_steps = tf.shape(values)[1]
initial_outputs = tf.TensorArray(dtype=tf.float32, size=time_steps)
initial_att = tf.TensorArray(dtype=tf.float32, size=time_steps)
def should_continue(t, *args):
return t < time_steps
def iteration(t, values, outputs, atts):
score = self.V(tf.nn.tanh(self.W(values)))
# attention_weights shape == (batch_size, time_step, 1)
attention_weights = tf.nn.softmax(score, axis=1)
# context_vector shape after sum == (batch_size, hidden_size)
context_vector = attention_weights * values
context_vector = tf.reduce_sum(context_vector, axis=1)
outputs = outputs.write(t, context_vector)
atts = atts.write(t, attention_weights)
return t + 1, values, outputs, atts
t, values, outputs, atts = tf.while_loop(should_continue, iteration,
[t, values, initial_outputs, initial_att])
outputs = outputs.stack()
outputs = tf.transpose(outputs, [1,0,2])
atts = atts.stack()
atts = tf.squeeze(atts, -1)
atts = tf.transpose(atts, [1,0,2])
return t, values, outputs, atts
For input= tf.constant(2, shape= [32, 100, 2048], dtype= tf.float32) I get the
output with shape = [32,100,2048] in tf2 and [32,None, 2048] in tf1.
For Input input= Input(shape= (None, 2048)) I get the output with shape = [None, None, 2048] in tf1 and I get error
TypeError: 'Tensor' object cannot be interpreted as an integer
in tf2.
Finally, in both cases, I can't use this layer in my model because my model input is Input(shape= (None, 2048)) and I get the error
AttributeError: 'NoneType' object has no attribute '_inbound_nodes'
in tf1 and in tf2 I get the same error as said in above, I create my model with Keras functional method.
From the code you have shared, looks like you want to implement Bahdanau's attention layer in your code. You want to attend to all the 'values' (prev layer output - all its hidden states) and your 'query' would be the last hidden state of the decoder. Your code should actually be very simple and should look like:
class Bahdanau(tf.keras.layers.Layer):
def __init__(self, n):
super(Bahdanau, self).__init__()
self.w = tf.keras.layers.Dense(n)
self.u = tf.keras.layers.Dense(n)
self.v = tf.keras.layers.Dense(1)
def call(self, query, values):
query = tf.expand_dims(query, 1)
e = self.v(tf.nn.tanh(self.w(query) + self.u(values)))
a = tf.nn.softmax(e, axis=1)
c = a * values
c = tf.reduce_sum(c, axis=1)
return a,c
##Say we want 10 units in the single layer MLP determining w,u
attentionlayer = Bahdanau(10)
##Call with i/p: decoderstate # t-1 and all encoder hidden states
a, c = attentionlayer(stminus1, hj)
We are not specifying the tensor shape anywhere in the code. This code will return you a context tensor of same size as 'stminus1' which is the 'query'. It does this after attending to all the 'values' (all output states of decoder) using Bahdanau's attention mechanism.
So assuming your batch size is 32, timesteps=100 and embedding dimension=2048, the shape of stminus1 should be (32,2048) and the shape of the hj should be (32,100,2048). The shape of the output context would be (32,2048). We also returned the 100 attention weights just in case you want to route them to a nice display.
This is the simplest version of 'Attention'. If you have any other intent, please let me know and I will reformat my answer. For more specific details, please refer https://towardsdatascience.com/create-your-own-custom-attention-layer-understand-all-flavours-2201b5e8be9e

Finetuning DNN with continuous outputs in the last layer

Greatly appreciate it if someone could help me out here:
I'm trying to do some finetuning on a regression task --- my inputs are 200X200 RGB images and my prediction output/label is a set of real values (let's say, within [0,10], though scaling is not a big deal here...?) --- on top of InceptionV3 architecture. Here are my functions that take a pretrained Inception model, remove the last layer and add a a new layer, set up for finetuning...
"""
Fine-tuning functions
"""
IM_WIDTH, IM_HEIGHT = 299, 299 #fixed size for InceptionV3
NB_EPOCHS = 3
BAT_SIZE = 32
FC_SIZE = 1024
NB_IV3_LAYERS_TO_FREEZE = 172
def eucl_dist(inputs):
x, y = inputs
return ((x - y)**2).sum(axis=-1)
def add_new_last_continuous_layer(base_model):
"""Add last layer to the convnet
Args:
base_model: keras model excluding top, for instance:
base_model = InceptionV3(weights='imagenet',include_top=False)
Returns:
new keras model with last layer
"""
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(FC_SIZE, activation='relu')(x)
predictions = Lambda(eucl_dist, output_shape=(1,))(x)
model = Model(input=base_model.input, output=predictions)
return model
def setup_to_finetune_continuous(model):
"""Freeze the bottom NB_IV3_LAYERS and retrain the remaining top
layers.
note: NB_IV3_LAYERS corresponds to the top 2 inception blocks in
the inceptionv3 architecture
Args:
model: keras model
"""
for layer in model.layers[:NB_IV3_LAYERS_TO_FREEZE]:
layer.trainable = False
for layer in model.layers[NB_IV3_LAYERS_TO_FREEZE:]:
layer.trainable = True
model.compile(optimizer=SGD(lr=0.0001, momentum=0.9),
loss='eucl_dist')
Here are my implementations:
base_model = InceptionV3(weights = "imagenet",
include_top=False, input_shape=(3,200,200))
model0 = add_new_last_continuous_layer(base_model)
setup_to_finetune_continuous(model0)
history=model0.fit(train_x, train_y, validation_data = (test_x, test_y), nb_epoch=epochs, batch_size=32)
scores = model0.evaluate(test_x, test_y, verbose = 0)
features = model0.predict(X_train)
where train_x is a (168435, 3, 200, 200) numpy array and train_y is a (168435,) numpy array. The same goes for test_x and test_y except the number of observations is 42509.
I got the TypeError: Tensor object is not iterable bug which occurred at predictions = Lambda(eucl_dist, output_shape=(1,))(x)'' when going through theadd_new_last_continuous_layer()`` function. Could you anyone kindly give me some guidance to get around that and what the problem is? Greatly appreciated and happy holidays!
EDIT:
Changed the functions to:
def eucl_dist(inputs):
x, y = inputs
return ((x - y)**2).sum(axis=-1)
def add_new_last_continuous_layer(base_model):
"""Add last layer to the convnet
Args:
base_model: keras model excluding top, for instance:
base_model = InceptionV3(weights='imagenet',include_top=False)
Returns:
new keras model with last layer
"""
x = base_model.output
x = GlobalAveragePooling2D()(x)
x1 = Dense(FC_SIZE, activation='relu')(x)
x2 = Dense(FC_SIZE, activation='relu')(x)
predictions = Lambda(eucl_dist, output_shape=eucl_dist_shape)([x1,x2])
model = Model(input=base_model.input, output=predictions)
return model
Your output shape for the lambda layer is wrong. Define your functions like this:
from keras import backend as K
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def eucl_dist_output_shape(shapes):
shape1, shape2 = shapes
return (shape1[0], 1)
predictions = Lambda(euclidean_distance, output_shape=eucl_dist_output_shape)([input1, input2])