AttributeError: The layer has never been called and thus has no defined output shape - tensorflow

I am trying to define a model happyModel()
# GRADED FUNCTION: happyModel
def happyModel():
"""
Implements the forward propagation for the binary classification model:
ZEROPAD2D -> CONV2D -> BATCHNORM -> RELU -> MAXPOOL -> FLATTEN -> DENSE
Note that for simplicity and grading purposes, you'll hard-code all the values
such as the stride and kernel (filter) sizes.
Normally, functions should take these values as function parameters.
Arguments:
None
Returns:
model -- TF Keras model (object containing the information for the entire training process)
"""
model = tf.keras.Sequential(
[
## ZeroPadding2D with padding 3, input shape of 64 x 64 x 3
tf.keras.layers.ZeroPadding2D(padding=(3,3), data_format=(64,64,3)),
## Conv2D with 32 7x7 filters and stride of 1
tf.keras.layers.Conv2D(32, (7, 7), strides = (1, 1), name = 'conv0'),
## BatchNormalization for axis 3
tf.keras.layers.BatchNormalization(axis = 3, name = 'bn0'),
## ReLU
tf.keras.layers.Activation('relu'),
## Max Pooling 2D with default parameters
tf.keras.layers.MaxPooling2D((2, 2), name='max_pool0'),
## Flatten layer
tf.keras.layers.Flatten(),
## Dense layer with 1 unit for output & 'sigmoid' activation
tf.keras.layers.Dense(1, activation='sigmoid', name='fc'),
# YOUR CODE STARTS HERE
# YOUR CODE ENDS HERE
]
)
return model
and following code is for creating the object of this model defined above:
happy_model = happyModel()
# Print a summary for each layer
for layer in summary(happy_model):
print(layer)
output = [['ZeroPadding2D', (None, 70, 70, 3), 0, ((3, 3), (3, 3))],
['Conv2D', (None, 64, 64, 32), 4736, 'valid', 'linear', 'GlorotUniform'],
['BatchNormalization', (None, 64, 64, 32), 128],
['ReLU', (None, 64, 64, 32), 0],
['MaxPooling2D', (None, 32, 32, 32), 0, (2, 2), (2, 2), 'valid'],
['Flatten', (None, 32768), 0],
['Dense', (None, 1), 32769, 'sigmoid']]
comparator(summary(happy_model), output)
I got following error:
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-67-f33284fd82fe> in <module>
1 happy_model = happyModel()
2 # Print a summary for each layer
----> 3 for layer in summary(happy_model):
4 print(layer)
5
~/work/release/W1A2/test_utils.py in summary(model)
30 result = []
31 for layer in model.layers:
---> 32 descriptors = [layer.__class__.__name__, layer.output_shape, layer.count_params()]
33 if (type(layer) == Conv2D):
34 descriptors.append(layer.padding)
/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer.py in output_shape(self)
2177 """
2178 if not self._inbound_nodes:
-> 2179 raise AttributeError('The layer has never been called '
2180 'and thus has no defined output shape.')
2181 all_output_shapes = set(
AttributeError: The layer has never been called and thus has no defined output shape.
I suspect my calling of ZeroPadding2D() is not right. The project seems to require the input shape of ZeroPadding2D() to be 64X64X3. I tried many formats but could not fix the problem. Anyone can give a pointer? Thanks a lot.

In your model definition, there's an issue with the following layer:
tf.keras.layers.ZeroPadding2D(padding=(3,3), data_format=(64,64,3)),
First, you didn't define any input layer also, the data_format is a string, one of channels_last (default) or channels_first, source. The correct way to define the above model as follows:
def happyModel():
model = tf.keras.Sequential(
[
## ZeroPadding2D with padding 3, input shape of 64 x 64 x 3
tf.keras.layers.ZeroPadding2D(padding=(3,3),
input_shape=(64, 64, 3), data_format="channels_last"),
....
....
happy_model = happyModel()
happy_model.summary()
Model: "sequential_2"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
zero_padding2d_4 (ZeroPaddin (None, 70, 70, 3) 0
_________________________________________________________________
conv0 (Conv2D) (None, 64, 64, 32) 4736
_________________________________________________________________
bn0 (BatchNormalization) (None, 64, 64, 32) 128
_________________________________________________________________
activation_2 (Activation) (None, 64, 64, 32) 0
_________________________________________________________________
max_pool0 (MaxPooling2D) (None, 32, 32, 32) 0
_________________________________________________________________
flatten_16 (Flatten) (None, 32768) 0
_________________________________________________________________
fc (Dense) (None, 1) 32769
=================================================================
Total params: 37,633
Trainable params: 37,569
Non-trainable params: 64

Per the documentation for tf.keras.Sequential() (https://www.tensorflow.org/api_docs/python/tf/keras/Sequential):
"Optionally, the first layer can receive an input_shape argument"
So instead of
tf.keras.layers.ZeroPadding2D(padding=(3,3), data_format=(64,64,3))
if you want to specify input shape it should be
tf.keras.layers.ZeroPadding2D(padding=(3,3), input_shape=(64,64,3))

model = tf.keras.Sequential([
# YOUR CODE STARTS HERE
tf.keras.layers.ZeroPadding2D(padding=(3, 3), input_shape=(64,64,3), data_format="channels_last"),
tf.keras.layers.Conv2D(32, (7, 7), strides = (1, 1)),
tf.keras.layers.BatchNormalization(axis=3),
tf.keras.layers.ReLU(),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(1, activation='sigmoid'),
# YOUR CODE ENDS HERE
])
return model
Try it working perfectly......

model = tf.keras.Sequential(
[
## ZeroPadding2D with padding 3, input shape of 64 x 64 x 3
## Conv2D with 32 7x7 filters and stride of 1
## BatchNormalization for axis 3
## ReLU
## Max Pooling 2D with default parameters
## Flatten layer
## Dense layer with 1 unit for output & 'sigmoid' activation
# YOUR CODE STARTS HERE
tfl.ZeroPadding2D(padding=(3,3), input_shape=(64,64,3),data_format="channels_last"),
tfl.Conv2D(32, (7, 7), strides = (1, 1), name = 'conv0'),
tfl.BatchNormalization(axis = 3, name = 'bn0'),
tfl.ReLU(),
tfl.MaxPooling2D((2, 2), name='max_pool0'),
tfl.Flatten(),
tfl.Dense(1, activation='sigmoid', name='fc'),
# YOUR CODE ENDS HERE
])
It is working you can try it.

Related

Error on custom dataset dimensions feeding transfer model in TensorFLow

Can someone explain this TensorFlow error for me, I'm having trouble understanding what I am doing wrong.
I have a dataset in Tensorflow constructed with a generator. When I test the output of the generator, output dimensions look correct (224 x 224 x 1). But when I try to train the model, I get an error:
WARNING:tensorflow:Model was constructed with shape (None, 224, 224, 1) for input
KerasTensor(type_spec=TensorSpec(shape=(None, 224, 224, 1), dtype=tf.float32,
name='input_2'), name='input_2', description="created by layer 'input_2'"),
but it was called on an input with incompatible shape (224, 224, 1, 1).
I'm unsure why the dimension of this output has an extra 1 at the end.
Here is the code to create the generator and model. df is a dataframe with file-paths to data and labels. The data are 2D matrices of variable dimensions. I'm using cv2.resize to make them 224x224 and then np.reshape to transform dimensions to (224x224x1). Then I yield the result.
def datagen_row():
# ======================== #
# Import data
# ======================== #
df = get_data()
rowsize = 224
colsize = 224
# ======================== #
#
# ======================== #
for row in range(len(df)):
data = get_data_from_filepath(df.iloc[row].file_path)
data = cv2.resize(data, dsize=(rowsize, colsize), interpolation=cv2.INTER_CUBIC)
labels = df.iloc[row].label
data = data.reshape( 224, 224, 1)
yield data, labels
dataset = tf.data.Dataset.from_generator(
datagen_row,
output_signature=(
tf.TensorSpec(shape = (int(os.getenv('rowsize')), int(os.getenv('colsize')), 1), dtype=tf.float32, name=None),
tf.TensorSpec(shape=(), dtype=tf.int64, name=None)
)
)
Testing the following I get what I expected:
iterator = iter(dataset.batch(8))
x = iterator.get_next()
x[0].shape # TensorShape([8, 224, 224, 1])
x[1].shape # TensorShape([8])
x[0] # <tf.Tensor: shape=(8, 224, 224, 1), dtype=float32, numpy=array(...
x[1] # <tf.Tensor: shape=(8,), dtype=int64, numpy=array([1, 1, 1, 1, 1, 1, 1, 1], dtype=int64)>
I'm trying to plug this into InceptionV3 model to do a classification
from tensorflow.keras.applications.inception_v3 import InceptionV3
from tensorflow.keras.layers import Input
from tensorflow.keras import layers
origModel = InceptionV3(weights = 'imagenet', include_top = False)
inputs = layers.Input(shape = (224, 224, 1))
modified_inputs = layers.Conv2D(3, 3, padding = 'same', activation='relu')(inputs)
x = origModel(modified_inputs)
x = layers.GlobalAveragePooling2D()(x)
x = layers.Dense(1024, activation = 'relu')(x)
x = layers.Dense(512, activation = 'relu')(x)
x = layers.Dense(256, activation = 'relu')(x)
x = layers.Dense(128, activation = 'relu')(x)
x = layers.Dense(64, activation = 'relu')(x)
x = layers.Dense(32, activation = 'relu')(x)
outputs = layers.Dense(2)(x)
model = tf.keras.Model(inputs, outputs)
model.summary() # 24.6 M trainable params
for layer in origModel.layers:
layer.trainable = False
model.summary() # now shows 2.8 M trainable params
model.compile(
optimizer = 'adam',
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits = True),
metrics = ['accuracy']
)
model.fit(dataset, epochs = 1, verbose = True, batch_size = 32)
Here is the output of model.summary
model.summary()
Model: "model"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_2 (InputLayer) [(None, 224, 224, 1)] 0
conv2d_94 (Conv2D) (None, 224, 224, 3) 30
inception_v3 (Functional) (None, None, None, 2048) 21802784
global_average_pooling2d (G (None, 2048) 0
lobalAveragePooling2D)
dense (Dense) (None, 1024) 2098176
dense_1 (Dense) (None, 512) 524800
dense_2 (Dense) (None, 256) 131328
dense_3 (Dense) (None, 128) 32896
dense_4 (Dense) (None, 64) 8256
dense_5 (Dense) (None, 32) 2080
dense_6 (Dense) (None, 2) 66
=================================================================
Total params: 24,600,416
Trainable params: 2,797,632
Non-trainable params: 21,802,784
_________________________________________________________________
This code worked after changing
model.fit(dataset, epochs = 1, verbose = True, batch_size = 32)
to
model.fit(dataset.batch(2), epochs = 1, verbose = True, batch_size = 32)
So... I will have to look into using dataset.batch versus batch_size in model.fit

Stacking ensemble with two different inputs for image segmenatation

I an stacking two models trained on different inputs from two data collections as shown below using Tensorflow Keras 2.6.2. The stacking is performed with a convolutional meta-learner to predict on a common hold out test set. Given below is the code and he model architecture.
#load data
#datase-1
X_tr1 = np.load('data/X_tr1.npy') #shape (200, 224,224,3)
Y_tr1 = np.load('data/Y_tr1.npy') #shape (200, 224,224,1)
X_val1 = np.load('data/X_val1.npy') #shape (100, 224,224,3)
Y_val1 = np.load('data/Y_val1.npy') #shape (100, 224,224,1)
#dataset-2
X_tr2 = np.load('data/X_tr2.npy') #shape (200, 224,224,3)
Y_tr2 = np.load('data/Y_tr2.npy') #shape (200, 224,224,1)
X_val2 = np.load('data/X_val2.npy') #shape (100, 224,224,3)
Y_val2 = np.load('data/Y_val2.npy') #shape (100, 224,224,1)
#common hold-out test set
X_ts = np.load('data/X_ts.npy') #shape (50, 224,224,3)
Y_ts = np.load('data/Y_ts.npy') #shape (50, 224,224,1)
#%%
#instantiate the models
img_width, img_height = 224,224
input_shape = (img_width, img_height, 3) #RGB inputs
model_input1 = Input(shape=input_shape) #input to model1
model_input2 = Input(shape=input_shape) #input to model2
n_classes=1 #grayscale mask output
activation='sigmoid'
batch_size = 8
n_epochs = 256
BACKBONE = 'vgg16'
# define model
model1 = sm.Unet(BACKBONE, encoder_weights='imagenet',
classes=n_classes, activation=activation)
model2 = sm.Unet(BACKBONE, encoder_weights='imagenet',
classes=n_classes, activation=activation)
#%%
# constructing a stacking ensemble of the two models
# A second-level fully-convolutional meta-learner is used to learn
# the features extracted from the penultimate layers of the models
n_models = 2
def load_all_models(n_models):
all_models = list()
model1.load_weights('weights/vgg16_1.hdf5') # path to model1
model_loss1a=Model(inputs=model1.input,
outputs=model1.get_layer('decoder_stage4b_relu').output) #name of the penultimate layer
x1 = model_loss1a.output
model1a = Model(inputs=model1.input, outputs=x1, name='model1')
all_models.append(model1a)
model2.load_weights('weights/vgg16_2.hdf5') #path to model2
model_loss2a=Model(inputs=model2.input,
outputs=model2.get_layer('decoder_stage4b_relu').output)
x2 = model_loss2a.output
model2a = Model(inputs=model2.input, outputs=x2, name='model2')
all_models.append(model2a)
return all_models
# load models
n_members = 2
members = load_all_models(n_members)
print('Loaded %d models' % len(members))
def define_stacked_model(members):
# update all layers in all models to not be trainable
for i in range(len(members)):
model = members[i]
for layer in model.layers [1:]:
# make not trainable
layer.trainable = False
layer._name = 'ensemble_' + str(i+1) + '_' + layer.name
ensemble_outputs = [model(model_input1, model_input2) for model in members]
merge = Concatenate()(ensemble_outputs)
# meta-learner, fully-convolutional
x4 = Conv2D(128, (3,3), activation='relu',
name = 'NewConv1', padding='same')(merge)
x5 = Conv2D(1, (1,1), activation='sigmoid',
name = 'NewConvfinal')(x4)
model= Model(inputs=[model_input1,model_input2],
outputs=x4)
return model
print("Creating Ensemble")
ensemble = define_stacked_model(members)
print("Ensemble architecture: ")
print(ensemble.summary())
Shown below is the architecture of the stacked model:
Model: "model_4"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) [(None, 224, 224, 3) 0
__________________________________________________________________________________________________
input_2 (InputLayer) [(None, 224, 224, 3) 0
__________________________________________________________________________________________________
model1 (Functional) (None, None, None, 1 23752128 input_1[0][0]
input_2[0][0]
__________________________________________________________________________________________________
model2 (Functional) (None, None, None, 1 23752128 input_1[0][0]
input_2[0][0]
__________________________________________________________________________________________________
concatenate (Concatenate) (None, 224, 224, 32) 0 model1[0][0]
model2[0][0]
__________________________________________________________________________________________________
NewConv1 (Conv2D) (None, 224, 224, 128 36992 concatenate[0][0]
__________________________________________________________________________________________________
NewConv2 (Conv2D) (None, 224, 224, 64) 73792 NewConv1[0][0]
__________________________________________________________________________________________________
NewConv3 (Conv2D) (None, 224, 224, 32) 18464 NewConv2[0][0]
__________________________________________________________________________________________________
NewConvfinal (Conv2D) (None, 224, 224, 1) 33 NewConv3[0][0]
==================================================================================================
Total params: 47,633,537
Trainable params: 129,281
Non-trainable params: 47,504,256
I compile and train the model as shown below:
opt = keras.optimizers.Adam(lr=0.001)
loss_func='binary_crossentropy'
ensemble.compile(optimizer=opt,
loss=loss_func,
metrics=['binary_accuracy'])
results_ensemble = ensemble.fit((X_tr1, Y_tr1, X_tr2, Y_tr2),
batch_size=batch_size,
epochs=n_epochs,
verbose=1,
validation_data=(X_val1, Y_val1, X_val2, Y_val2))
I get the following error:
Traceback (most recent call last):
File "/home/codes/untitled5.py", line 563, in <module>
validation_data=(X_val1, Y_val1, X_val2, Y_val2))
File "/home/anaconda3/envs/tf262/lib/python3.7/site-packages/keras/engine/training.py", line 1125, in fit
data_adapter.unpack_x_y_sample_weight(validation_data))
File "/home/anaconda3/envs/tf262/lib/python3.7/site-packages/keras/engine/data_adapter.py", line 1574, in unpack_x_y_sample_weight
raise ValueError(error_msg)
ValueError: Data is expected to be in format `x`, `(x,)`, `(x, y)`, or `(x, y, sample_weight)`, found: (array([[[[0.09803922, 0.09803922, 0.09803922],
[0.09803922, 0.09803922, 0.09803922],
[0.09803922, 0.09803922, 0.09803922],
...,
[0.08627451, 0.08627451, 0.08627451],
[0.08627451, 0.08627451, 0.08627451],
[0.05098039, 0.05098039, 0.05098039]],...
Also how do I predict with a single X_ts provided the ensemble model now has two separate inputs?
New error after trying to implement the suggestions:
File "/home/codes/untitled5.py", line 595, in <module>
validation_data=outputs)
File "/home/anaconda3/envs/tf262/lib/python3.7/site-packages/keras/engine/training.py", line 1184, in fit
tmp_logs = self.train_function(iterator)
ValueError: Layer model_4 expects 2 input(s), but it received 4 input tensors. Inputs received: [<tf.Tensor 'IteratorGetNext:0' shape=(None, 224, 224, 3) dtype=float32>, <tf.Tensor 'IteratorGetNext:1' shape=(None, 224, 224, 1) dtype=float32>, <tf.Tensor 'IteratorGetNext:2' shape=(None, 224, 224, 3) dtype=float32>, <tf.Tensor 'IteratorGetNext:3' shape=(None, 224, 224, 1) dtype=float32>]
Answer based on comment. Multi-inputs need to be passed as a list, not a tuple.
Change:
results_ensemble = ensemble.fit((X_tr1, Y_tr1, X_tr2, Y_tr2),
batch_size=batch_size,
epochs=n_epochs,
verbose=1,
validation_data=(X_val1, Y_val1, X_val2, Y_val2))
To:
inputs = [X_tr1, Y_tr1, X_tr2, Y_tr2] # you can pass the list itself or the variable
results_ensemble = ensemble.fit(inputs,
batch_size=batch_size,
epochs=n_epochs,
verbose=1,
validation_data=([X_val1, X_val2], y_val))
# test_inputs_diff = [x_test1, x_test2] # different input
# test_inputs_same = [x_test1, x_test1] # same input
# preds_diff = ensemble.predict(test_inputs_diff)
# preds_same = ensemble.predict(test_inputs_same)

Sentence classification: Why does my embedding not reduce the shape of the subsequent layer?

I want to embed sentences that all contain 5 words and a my training-set has a total vocabulary of 10000 words. I use this code:
import tensorflow as tf
vocab_size = 10000
inputs = tf.keras.layers.Input(shape=(5,vocab_size), name="input", )
embedding = tf.keras.layers.Embedding(10000, 64)(inputs)
conv2d_1 = Conv2D( filters = 32, kernel_size = (3,3),
strides =(1), padding = 'SAME',)(embedding)
model = tf.keras.models.Model(inputs=inputs, outputs=conv2d_1)
model.summary()
After running I get:
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input (InputLayer) [(None, 5, 10000)] 0
_________________________________________________________________
embedding_105 (Embedding) (None, 5, 10000, 64) 640000
_________________________________________________________________
conv2d_102 (Conv2D) (None, 5, 10000, 32) 18464
=================================================================
I want to do the embedding to convert the sparse 10000x5 tensor to a dense 64x5 tensor. Apparently that doesn't work as intended, so my question is: Why is the shape of the next layer not (None, 5, 64, 32) instead of (None, 5, 10000, 32)? How can I achieve the compactization?

How is it possible to encode an input with one 2D Convolution and applying the opposite 2D DeConv / Transposed Conv to get the same dimension back?

I am working on an autoencoder and I have an issue with reproducing the input in the same size. If I am using transposed convolution / deconvolution operation with the same parameters, I got a different output size then the original input was. For illustrating my problem, let us assume our model consists of just one convlution (to encode the input) and one deconvolution (to decode the encoded input). However, I not get the same size as my input. More precisely, the second and third dimension / axis 1 and axis 2 are 16 and not as one would expect: 15. Here is the code:
import tensorflow as tf
input = tf.keras.Input(shape=(15, 15, 3), name="Input0")
conv2d_layer2 = tf.keras.layers.Conv2D(filters=32, strides=[2, 2], kernel_size=[3, 3],
padding='same',
activation='selu', name="Conv1")
conv2d_trans_layer2 = tf.keras.layers.Conv2DTranspose(filters=32, strides=[2, 2],
kernel_size=[3, 3], padding='same',
activation='selu', name="DeConv1")
x_endcoded_1 = conv2d_layer2(input)
x_reconstructed = conv2d_trans_layer2(x_endcoded_1)
model = tf.keras.Model(inputs=input, outputs=x_reconstructed)
Results in the following model:
Use tf.where in 2.0, which has the same broadcast rule as np.where
Model: "model"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
Input0 (InputLayer) [(None, 15, 15, 3)] 0
_________________________________________________________________
Conv1 (Conv2D) (None, 8, 8, 32) 896
_________________________________________________________________
DeConv1 (Conv2DTranspose) (None, 16, 16, 32) 9248
=================================================================
Total params: 10,144
Trainable params: 10,144
How can I reproduce my original input with using just this tranposed convolution? Is this possible?
deleting padding from both you can reproduce the mapping
input = Input(shape=(15, 15, 3), name="Input0")
conv2d_layer2 = Conv2D(filters=32, strides=[2, 2], kernel_size=[3, 3],
activation='selu', name="Conv1")(input)
conv2d_trans_layer2 = Conv2DTranspose(filters=32, strides=[2, 2],
kernel_size=[3, 3],
activation='selu', name="DeConv1")(conv2d_layer2)
model = Model(inputs=input, outputs=conv2d_trans_layer2)
model.summary()
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
Input0 (InputLayer) [(None, 15, 15, 3)] 0
_________________________________________________________________
Conv1 (Conv2D) (None, 7, 7, 32) 896
_________________________________________________________________
DeConv1 (Conv2DTranspose) (None, 15, 15, 32) 9248
=================================================================
In general, to do this in deeper structures you have to play with padding, strides and pooling
online there are a lot of good resources that explain how this operation works and their application in keras
Padding and Stride for Convolutional Neural Networks
Pooling Layers for Convolutional Neural Networks
How to use the UpSampling2D and Conv2DTranspose

What is the correct way to upsample a [32x32x6] layer in a CNN

I have a CNN that produces a [32x32] image with 6 channels, but I need to upsample it to 256x256. I'm doing:
def upsample(filters, size):
initializer = tf.random_normal_initializer(0., 0.02)
result = tf.keras.Sequential()
result.add(tf.keras.layers.Conv2DTranspose(filters, size, strides=2,
padding='same',
kernel_initializer=initializer,
use_bias=False))
return result
Then I pass the layer like this:
up_stack = [
upsample(6, 3), # x2
upsample(6, 3), # x2
upsample(6, 3) # x2
]
for up in up_stack:
finalLayer = up(finalLayer)
But this setup produces inaccurate results. Is there anything I'm doing wrong?
Your other option would be to use tf.keras.layers.UpSampling2D for your purpose, but that doesn't learn a kernel to upsample (it uses bilinear upsampling).
So, your approach is correct. But, you have used kernel_size as 3x3.
It should be 2x2 and if you are not satisfied with the results, you should increase the number of filters from [32, 256].
If you wish to use the up-convolution, I will suggest doing the following to achieve what you want. Following code works, just change the filter based on your need.
import tensorflow as tf
from tensorflow.keras import layers
# in = 32x32 out 256x256
inputs = layers.Input(shape=(32, 32, 6))
deconc01 = layers.Conv2DTranspose(256, kernel_size=2, strides=(2, 2), activation='relu')(inputs)
deconc02 = layers.Conv2DTranspose(256, kernel_size=2, strides=(2, 2), activation='relu')(deconc01)
outputs = layers.Conv2DTranspose(256, kernel_size=2, strides=(2, 2), activation='relu')(deconc02)
model = tf.keras.Model(inputs=inputs, outputs=outputs, name="up-conv")
Model: "up-conv"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) [(None, 32, 32, 6)] 0
_________________________________________________________________
conv2d_transpose (Conv2DTran (None, 64, 64, 256) 6400
_________________________________________________________________
conv2d_transpose_1 (Conv2DTr (None, 128, 128, 256) 262400
_________________________________________________________________
conv2d_transpose_2 (Conv2DTr (None, 256, 256, 256) 262400
=================================================================
Total params: 531,200
Trainable params: 531,200
Non-trainable params: 0
_________________________________________________________________