Number of parameters counting in GRU - tensorflow2.0

I have GRU model as follows.
class CharGenModel(tf.keras.Model):
def __init__(self, vocab_size, num_timesteps, embedding_dim, **kwargs):
super(CharGenModel, self).__init__(**kwargs)
self.embedding_layer = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.rnn_layer = tf.keras.layers.GRU(
num_timesteps,
recurrent_initializer="glorot_uniform",
recurrent_activation="sigmoid",
stateful=True,
return_sequences=True
)
self.dense_layer = tf.keras.layers.Dense(vocab_size)
def call(self, x):
print(x.shape)
x = self.embedding_layer(x)
print(x.shape)
x = self.rnn_layer(x)
print(x.shape)
x = self.dense_layer(x)
print(x.shape)
return x
vocab_size = 92
embedding_dim = 256
seq_length = 100
batch_size = 64
model = CharGenModel(vocab_size, seq_length, embedding_dim)
model.build(input_shape=(batch_size, seq_length))
model.summary()
model.summary() produced the number of trainable parameters as follows.
(64, 100)
(64, 100, 256)
(64, 100, 100)
(64, 100, 92)
Model: "char_gen_model_4"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
embedding_4 (Embedding) multiple 23552
gru_4 (GRU) multiple 107400
dense_4 (Dense) multiple 9292
=================================================================
Total params: 140,244
Trainable params: 140,244
Non-trainable params: 0
I'm confused for two things.
According to embedding layer definition
tf.keras.layers.Embedding(
input_dim,
output_dim,
embeddings_initializer='uniform',
embeddings_regularizer=None,
activity_regularizer=None,
embeddings_constraint=None,
mask_zero=False,
input_length=None,
**kwargs
)
input_dim for embedding layer for my application is 64x100.
(1)But why embedding layer trainable parameters are 92x256=23552. Why not 100x256?
(2)Number of parameters counting for GRU is
num_params = number of FFNNs × [number of hidden units x (number of hidden units+number of inputs) + number of bias]
number of FFNNs(Number of feedforward networks) in GRU is 3
number of hidden units is 100
number of inputs is 256
number of bias is 100
so num_params = 3 x [100x(100+256)+100] = 107100
But model summary output is 107400
Where am I missing in calculation?

Related

GAN Discriminator model input shape issue

I am trying to create a simple GAN model but I am getting an input error in Discriminator. Any suggestions or help is welcome
seed(33)
tf.random.set_seed(432)
Generator
Input size (32,) output size (9,)
# Define the generator model
def build_generator():
generator_input = Input(shape=(32,))
x = Dense(16, activation='relu')(generator_input)
x = Dense(9, activation='linear')(x)
generator = Model(generator_input, x)
return generator
Discriminator
The model has 2 embeddings for batsman & bowler and 7 other predictors for a total of 9 predictors
# Define the discriminator model
def build_discriminator():
bowlerIdx_input = Input(shape=(1,), name='bowlerIdx')
batsmanIdx_input = Input(shape=(1,), name='batsmanIdx')
ballNum_input = Input(shape=(1,), name='ballNum')
ballsRemaining_input = Input(shape=(1,), name='ballsRemaining')
runs_input = Input(shape=(1,), name='runs')
runRate_input = Input(shape=(1,), name='runRate')
numWickets_input = Input(shape=(1,), name='numWickets')
runsMomentum_input = Input(shape=(1,), name='runsMomentum')
perfIndex_input = Input(shape=(1,), name='perfIndex')
no_of_unique_batman=len(df1["batsmanIdx"].unique())
print(no_of_unique_batman)
no_of_unique_bowler=len(df1["bowlerIdx"].unique())
print(no_of_unique_bowler)
embedding_size_bat = no_of_unique_batman ** (1/4)
print(embedding_size_bat)
embedding_size_bwl = no_of_unique_bowler ** (1/4)
print(embedding_size_bwl)
# create embedding layer for the categorical predictor
batsmanIdx_embedding = Embedding(input_dim=4742, output_dim=16,input_length=1)(batsmanIdx_input)
print(batsmanIdx_embedding)
batsmanIdx_flatten = Flatten()(batsmanIdx_embedding)
print(batsmanIdx_flatten)
bowlerIdx_embedding = Embedding(input_dim=3492, output_dim=16,input_length=1)(bowlerIdx_input)
bowlerIdx_flatten = Flatten()(bowlerIdx_embedding)
print(bowlerIdx_flatten)
# concatenate all the predictors
discriminator_input = keras.layers.concatenate([batsmanIdx_flatten,bowlerIdx_flatten, ballNum_input, ballsRemaining_input, runs_input, runRate_input, numWickets_input, runsMomentum_input, perfIndex_input])
print(discriminator_input.shape)
# add hidden layers
x = Dense(64, activation='relu')(discriminator_input)
x = Dense(32, activation='relu')(x)
x = Dense(16, activation='relu')(x)
x = Dense(8, activation='relu')(x)
# add output layer
output = Dense(1, activation='sigmoid', name='output')(x)
# create model
discriminator = Model(inputs=[batsmanIdx_input, bowlerIdx_input, ballNum_input, ballsRemaining_input, runs_input, runRate_input, numWickets_input, runsMomentum_input, perfIndex_input], outputs=output)
return discriminator
GAN model
The generator output is fed to discriminator. Concatenate synthetic and real data and run discriminator. Minimize loss
# Define the GAN model
def build_gan(generator, discriminator):
gan_input = Input(shape=(1024,))
x = generator(gan_input)
gan_output = discriminator(x)
gan = Model(gan_input, gan_output)
return gan
# Instantiate the generator, discriminator, and GAN models
generator = build_generator()
discriminator = build_discriminator()
discriminator.summary()
gan = build_gan(generator, discriminator)
# Compile the generator and discriminator models
discriminator.compile(optimizer=Adam(learning_rate=.0002, beta_1=0.5), loss='binary_crossentropy', metrics=['accuracy'])
gan.compile(optimizer=Adam(learning_rate=.0005, beta_1=0.5), loss='binary_crossentropy',metrics=['accuracy'])
# Set the batch size and number of epochs
batch_size = 1024
num_epochs = 20
# Store the losses over time
gen_losses = []
dis_losses = []
gen_acc = []
dis_acc = []
# Train the GAN
for epoch in range(num_epochs):
# Generate synthetic data
synthetic_data = generator.predict(np.random.randn(batch_size, 1024))
# Concatenate synthetic data with real data
real_data = train_dataset1.values
data = np.concatenate((synthetic_data, real_data))
# Create labels for synthetic and real data
labels = np.concatenate((np.zeros(batch_size), np.ones(len(real_data))))
# Train the discriminator on synthetic and real data
d_loss = discriminator.fit([data['batsmanIdx'],data['bowlerIdx'],data['ballNum'],data['ballsRemaining'],data['runs'],
data['runRate'],data['numWickets'],data['runsMomentum'],data['perfIndex']], labels)
#d_loss = discriminator.train_on_batch(data, labels)
dis_losses.append(d_loss[0])
dis_acc.append(d_loss[1])
# Generate random noise for the generator
random_noise = np.random.randn(batch_size, 1024)
# Create labels for the generator (all ones, since we want the generator to fool the discriminator)
# Create labels for the generator (all ones, since we want the generator to fool the discriminator)
generator_labels = np.ones(batch_size)
# Train the generator
g_loss = gan.train_on_batch(random_noise, generator_labels)
gen_losses.append(g_loss[0])
gen_acc.append(g_loss[1])
# Print loss values for each epoch
print(f'Epoch: {epoch+1}, Discriminator Loss: {d_loss}, Generator Loss: {g_loss}')
The output and error I get is
Model: "model_56"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
batsmanIdx (InputLayer) [(None, 1)] 0 []
bowlerIdx (InputLayer) [(None, 1)] 0 []
embedding_2 (Embedding) (None, 1, 16) 75872 ['batsmanIdx[0][0]']
embedding_3 (Embedding) (None, 1, 16) 55872 ['bowlerIdx[0][0]']
flatten_2 (Flatten) (None, 16) 0 ['embedding_2[0][0]']
flatten_3 (Flatten) (None, 16) 0 ['embedding_3[0][0]']
ballNum (InputLayer) [(None, 1)] 0 []
ballsRemaining (InputLayer) [(None, 1)] 0 []
runs (InputLayer) [(None, 1)] 0 []
runRate (InputLayer) [(None, 1)] 0 []
numWickets (InputLayer) [(None, 1)] 0 []
runsMomentum (InputLayer) [(None, 1)] 0 []
perfIndex (InputLayer) [(None, 1)] 0 []
concatenate_28 (Concatenate) (None, 39) 0 ['flatten_2[0][0]',
'flatten_3[0][0]',
'ballNum[0][0]',
'ballsRemaining[0][0]',
'runs[0][0]',
'runRate[0][0]',
'numWickets[0][0]',
'runsMomentum[0][0]',
'perfIndex[0][0]']
dense_228 (Dense) (None, 64) 2560 ['concatenate_28[0][0]']
dropout_111 (Dropout) (None, 64) 0 ['dense_228[0][0]']
dense_229 (Dense) (None, 32) 2080 ['dropout_111[0][0]']
dropout_112 (Dropout) (None, 32) 0 ['dense_229[0][0]']
dense_230 (Dense) (None, 16) 528 ['dropout_112[0][0]']
dropout_113 (Dropout) (None, 16) 0 ['dense_230[0][0]']
dense_231 (Dense) (None, 8) 136 ['dropout_113[0][0]']
dropout_114 (Dropout) (None, 8) 0 ['dense_231[0][0]']
output (Dense) (None, 1) 9 ['dropout_114[0][0]']
==================================================================================================
Total params: 137,057
Trainable params: 137,057
Non-trainable params: 0
__________________________________________________________________________________________________
WARNING:tensorflow:Model was constructed with shape (None, 1) for
input KerasTensor(type_spec=TensorSpec(shape=(None, 1),
dtype=tf.float32, name='batsmanIdx'), name='batsmanIdx',
description="created by layer 'batsmanIdx'"), but it was called
on an input with incompatible shape (None, 9).
(None, 9)
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-30-8cbe40cd27bc> in <module>
94 print("111")
95 discriminator.summary()
---> 96 gan = build_gan(generator, discriminator)
97
98 # Compile the generator and discriminator models
2 frames
/usr/local/lib/python3.8/dist-packages/keras/engine/functional.py in _run_internal_graph(self, inputs, training, mask)
677 for x in self.outputs:
678 x_id = str(id(x))
--> 679 assert x_id in tensor_dict, "Could not compute output " + str(x)
680 output_tensors.append(tensor_dict[x_id].pop())
681
AssertionError: Exception encountered when calling layer "model_56" (type Functional).
Could not compute output KerasTensor(type_spec=TensorSpec(shape=(None, 1), dtype=tf.float32, name=None), name='output/Sigmoid:0', description="created by layer 'output'")
Call arguments received by layer "model_56" (type Functional):
• inputs=tf.Tensor(shape=(None, 9), dtype=float32)
• training=None
• mask=None
I don't understand what this message means. I am supposed to input all 9 predictors but for some reason it only picks the first
WARNING:tensorflow:Model was constructed with shape (None, 1)
for input KerasTensor(type_spec=TensorSpec(shape=(None, 1),
dtype=tf.float32, name='batsmanIdx'), name='batsmanIdx',
description="created by layer 'batsmanIdx'"), but it was called
on an input with incompatible shape (None, 9).
(. None, 9)
All thoughts, suggestions are welcome

ValueError: Input 0 of layer "model" is incompatible with the layer: expected shape=(None, 50), found shape=(None, 1, 512)

Learning to use bert-base-cased and a classification model... the code for the model is the following:
def mao_func(input_ids, masks, labels):
return {'input_ids':input_ids, 'attention_mask':masks}, labels
dataset = dataset.map(mao_func)
BATCH_SIZE = 32
dataset = dataset.shuffle(100000).batch(BATCH_SIZE)
split = .8
ds_len = len(list(dataset))
train = dataset.take(round(ds_len * split))
val = dataset.skip(round(ds_len * split))
from transformers import TFAutoModel
bert = TFAutoModel.from_pretrained('bert-base-cased')
Model: "tf_bert_model"
Layer (type) Output Shape Param #
bert (TFBertMainLayer) multiple 108310272
=================================================================
Total params: 108,310,272
Trainable params: 108,310,272
Non-trainable params: 0
then the NN builduing:
input_ids = tf.keras.layers.Input(shape=(50,), name='input_ids', dtype='int32')
mask = tf.keras.layers.Input(shape=(50,), name='attention_mask', dtype='int32')
embeddings = bert(input_ids, attention_mask=mask)[0]
X = tf.keras.layers.GlobalMaxPool1D()(embeddings)
X = tf.keras.layers.BatchNormalization()(X)
X = tf.keras.layers.Dense(128, activation='relu')(X)
X = tf.keras.layers.Dropout(0.1)(X)
X = tf.keras.layers.Dense(32, activation='relu')(X)
y = tf.keras.layers.Dense(3, activation='softmax',name='outputs')(X)
model = tf.keras.Model(inputs=[input_ids, mask], outputs=y)
model.layers[2].trainable = False
the model.summary is:
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_ids (InputLayer) [(None, 50)] 0 []
attention_mask (InputLayer) [(None, 50)] 0 []
tf_bert_model (TFBertModel) TFBaseModelOutputWi 108310272 ['input_ids[0][0]',
thPoolingAndCrossAt 'attention_mask[0][0]']
tentions(last_hidde
n_state=(None, 50,
768),
pooler_output=(Non
e, 768),
past_key_values=No
ne, hidden_states=N
one, attentions=Non
e, cross_attentions
=None)
global_max_pooling1d (GlobalMa (None, 768) 0 ['tf_bert_model[0][0]']
xPooling1D)
batch_normalization (BatchNorm (None, 768) 3072 ['global_max_pooling1d[0][0]']
alization)
dense (Dense) (None, 128) 98432 ['batch_normalization[0][0]']
dropout_37 (Dropout) (None, 128) 0 ['dense[0][0]']
dense_1 (Dense) (None, 32) 4128 ['dropout_37[0][0]']
outputs (Dense) (None, 3) 99 ['dense_1[0][0]']
==================================================================================================
Total params: 108,416,003
Trainable params: 104,195
Non-trainable params: 108,311,808
__________________________________________________________________________________________________
finally the model fitting is
optimizer = tf.keras.optimizers.Adam(0.01)
loss = tf.keras.losses.CategoricalCrossentropy()
acc = tf.keras.metrics.CategoricalAccuracy('accuracy')
model.compile(optimizer,loss=loss, metrics=[acc])
history = model.fit(
train,
validation_data = val,
epochs=140
)
with execution error in line 7 -> the model.fit(...):
ValueError: Input 0 of layer "model" is incompatible with the layer: expected shape=(None, 50), found shape=(None, 1, 512)
Can any one be so kind of helping me on what I did wrong and why... thanks:)
update: here is the git with the codes https://github.com/CharlieArreola/OnlinePosts
It seems, that your shape of the train data doen't match the expected input shape of your input layer.
You can check your shape of the train data with train.shape()
You input layer Input_ids = tf.keras.layers.Input(shape=(50,), name='input_ids', dtype='int32') expects train data with 50 columns, but you most likely have 512 if we look at your error.
So to fix this, you could simply change your input shape.
Input_ids = tf.keras.layers.Input(shape=(512,), name='input_ids', dtype='int32')
If you split your x and y in your dataset you can make it more flexible with:
Input_ids = tf.keras.layers.Input(shape=(train_x.shape[0],), name='input_ids', dtype='int32')
Also don't forget, that you have to do this change to all of your input layers!

keras define a trainable variable for add or matmul

I have some problems in use tf.keras to build model. Now I want to define a trainbale weight tensor with shape(64, 128), which similar to tf.get_variable. However I can't achieve it.
In the past, I have try many methods.But I want to look for easily method.
inputs = tf.keras.Input((128,))
weights = tf.Variable(tf.random.normal((64, 128)))
output = tf.keras.layers.Lambda(lambda x: tf.matmul(x, tf.transpose(weights)))(inputs)
model = tf.keras.Model(inputs, output)
model.summary()
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_10 (InputLayer) (None, 128) 0
_________________________________________________________________
lambda_2 (Lambda) (None, 64) 0
=================================================================
Total params: 0
Trainable params: 0
Non-trainable params: 0
The defined weights is not trainable.
In addition, I know Dense can get trained matrix weights and bias. But if I want add a bias, I can't use Dense.
However, I have to use add_weights in custome layer, for example:
class Bias(keras.layers.Layer):
def build(self, input_shape):
self.bias = self.add_weight(shape=(64, 128), initializer='zeros', dtype=tf.float32, name='x')
self.built = True
def call(self, inputs):
return inputs + self.bias
inputs = Input(shape=(64, 128))
outputs = Bias()(inputs)
model = Model(inputs=inputs, outputs=outputs)
model.summary()
Layer (type) Output Shape Param #
=================================================================
input_11 (InputLayer) (None, 64, 128) 0
_________________________________________________________________
bias_5 (Bias) (None, 64, 128) 8192
=================================================================
Total params: 8,192
Trainable params: 8,192
Non-trainable params: 0
Is there any more easily method to define a trainable variable ?

subclass of tf.keras.Model can not get summay() result

I want build subclass of tf.keras.Model and want to see the model structure with summary function. But it not works. The following is my code:
import tensorflow as tf
class MyModel(tf.keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.conv1 = tf.keras.layers.Conv2D(32, 3, activation='relu')
self.flatten = tf.keras.layers.Flatten()
self.d1 = tf.keras.layers.Dense(128, activation='relu')
self.d2 = tf.keras.layers.Dense(10, activation='softmax')
def call(self, x):
x = self.conv1(x)
x = self.flatten(x)
x = self.d1(x)
return self.d2(x)
model = MyModel()
model.summary()
The error:
ValueError: This model has not yet been built. Build the model first
by calling build() or calling fit() with some data, or specify an
input_shape argument in the first layer(s) for automatic build.
You need to call each layer once to infer shapes and then call build() method of the tf.keras.Model with model's input shape as argument:
import tensorflow as tf
import numpy as np
class MyModel(tf.keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.conv1 = tf.keras.layers.Conv2D(32, 3, activation='relu')
self.flatten = tf.keras.layers.Flatten()
self.d1 = tf.keras.layers.Dense(128, activation='relu')
self.d2 = tf.keras.layers.Dense(10, activation='softmax')
x = np.random.normal(size=(1, 32, 32, 3))
x = tf.convert_to_tensor(x)
_ = self.call(x)
def call(self, x):
x = self.conv1(x)
x = self.flatten(x)
x = self.d1(x)
return self.d2(x)
model = MyModel()
model.build((32, 32, 3))
model.summary()
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) multiple 896
_________________________________________________________________
flatten (Flatten) multiple 0
_________________________________________________________________
dense (Dense) multiple 3686528
_________________________________________________________________
dense_1 (Dense) multiple 1290
=================================================================
Total params: 3,688,714
Trainable params: 3,688,714
Non-trainable params: 0
_________________________________________________________________
A better solution is listed here. You need to provide a model method to infer the model explicitly.
import tensorflow as tf
from tensorflow.keras.layers import Input
class MyModel(tf.keras.Model):
def __init__(self):
super().__init__()
self.dense = tf.keras.layers.Dense(1)
def call(self, inputs, **kwargs):
return self.dense(inputs)
def model(self):
x = Input(shape=(1))
return Model(inputs=[x], outputs=self.call(x))
MyModel().model().summary()
Editing #Vlad's answer to avoid this error ValueError: Input 0 of layer conv2d_10 is incompatible with the layer: : expected min_ndim=4, found ndim=3. Full shape received: (32, 32, 3)
Change this line from:
model.build((32, 32, 3 ))
To:
model.build((None, 32, 32, 3 ))
Final Code:
class MyModel(tf.keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.conv1 = tf.keras.layers.Conv2D(32, 3, activation='relu')
self.flatten = tf.keras.layers.Flatten()
self.d1 = tf.keras.layers.Dense(128, activation='relu')
self.d2 = tf.keras.layers.Dense(10, activation='softmax')
x = np.random.normal(size=(1, 32, 32, 3))
x = tf.convert_to_tensor(x)
_ = self.call(x)
def call(self, x):
x = self.conv1(x)
x = self.flatten(x)
x = self.d1(x)
return self.d2(x)
model = MyModel()
model.build((None, 32, 32, 3 ))
model.summary()

How to use keras layers in custom keras layer

I am trying to write my own keras layer. In this layer, I want to use some other keras layers. Is there any way to do something like this:
class MyDenseLayer(tf.keras.layers.Layer):
def __init__(self, num_outputs):
super(MyDenseLayer, self).__init__()
self.num_outputs = num_outputs
def build(self, input_shape):
self.fc = tf.keras.layers.Dense(self.num_outputs)
def call(self, input):
return self.fc(input)
layer = MyDenseLayer(10)
When I do something like
input = tf.keras.layers.Input(shape = (16,))
output = MyDenseLayer(10)(input)
model = tf.keras.Model(inputs = [input], outputs = [output])
model.summary()
it outputs
How do I make weiths in the dense there trainable?
If you look at the documentation for how to add custom layers, they recommend that you use the .add_weight(...) method. This method internally places all weights in self._trainable_weights. So to do what you want, you mush first define the keras layers you want to use, build them, copy the weights and then build your own layer. If I update your code it should be something like
class mylayer(tf.keras.layers.Layer):
def __init__(self, num_outputs, num_outputs2):
self.num_outputs = num_outputs
super(mylayer, self).__init__()
def build(self, input_shape):
self.fc = tf.keras.layers.Dense(self.num_outputs)
self.fc.build(input_shape)
self._trainable_weights = self.fc.trainable_weights
super(mylayer, self).build(input_shape)
def call(self, input):
return self.fc(input)
layer = mylayer(10)
input = tf.keras.layers.Input(shape=(16, ))
output = layer(input)
model = tf.keras.Model(inputs=[input], outputs=[output])
model.summary()
You should then get what you want
It's much more comfortable and concise to put existing layers in the tf.keras.models.Model class. If you define non-custom layers such as layers, conv2d, the parameters of those layers are not trainable by default.
class MyDenseLayer(tf.keras.Model):
def __init__(self, num_outputs):
super(MyDenseLayer, self).__init__()
self.num_outputs = num_outputs
self.fc = tf.keras.layers.Dense(num_outputs)
def call(self, input):
return self.fc(input)
def compute_output_shape(self, input_shape):
shape = tf.TensorShape(input_shape).as_list()
shape[-1] = self.num_outputs
return tf.TensorShape(shape)
layer = MyDenseLayer(10)
Check this tutorial: https://www.tensorflow.org/guide/keras#model_subclassing
In the TF2 custom layer Guide, they "recommend creating such sublayers in the __init__ method (since the sublayers will typically have a build method, they will be built when the outer layer gets built)." So just move the creation of self.fc into __init__ will give what you want.
class MyDenseLayer(tf.keras.layers.Layer):
def __init__(self, num_outputs):
super(MyDenseLayer, self).__init__()
self.num_outputs = num_outputs
self.fc = tf.keras.layers.Dense(self.num_outputs)
def build(self, input_shape):
self.built = True
def call(self, input):
return self.fc(input)
input = tf.keras.layers.Input(shape = (16,))
output = MyDenseLayer(10)(input)
model = tf.keras.Model(inputs = [input], outputs = [output])
model.summary()
Output:
Model: "model_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_2 (InputLayer) [(None, 16)] 0
_________________________________________________________________
my_dense_layer_2 (MyDenseLay (None, 10) 170
=================================================================
Total params: 170
Trainable params: 170
Non-trainable params: 0
This works for me and is clean, concise, and readable.
import tensorflow as tf
class MyDense(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(MyDense, self).__init__(kwargs)
self.dense = tf.keras.layers.Dense(2, tf.keras.activations.relu)
def call(self, inputs, training=None):
return self.dense(inputs)
inputs = tf.keras.Input(shape=10)
outputs = MyDense(trainable=True)(inputs)
model = tf.keras.Model(inputs=inputs, outputs=outputs, name='test')
model.compile(loss=tf.keras.losses.MeanSquaredError())
model.summary()
Output:
Model: "test"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) [(None, 10)] 0
_________________________________________________________________
my_dense (MyDense) (None, 2) 22
=================================================================
Total params: 22
Trainable params: 22
Non-trainable params: 0
_________________________________________________________________
Note that trainable=True is needed. I have posted a questions about it here.