How to get the shape / dimension of the layers? - cntk

When I have defined a model like this:
def create_basic_model_terse(input, out_dims):
with default_options(activation=relu):
model = Sequential([
LayerStack(3, lambda i: [
Convolution((5,5), [32,32,64][i], init=glorot_uniform(), pad=True),
MaxPooling((3,3), strides=(2,2))
]),
Dense(64, init=glorot_uniform()),
Dense(out_dims, init=glorot_uniform(), activation=None)
])
return model(input)
How can I get some kind of information about each layer in the network like output shape / dimensions?

You can look at CNTK 202 tutorials. There are other tutorials such as CNTK 105 that also shows how to get different attributes of models.
For a model
def create_model():
with default_options(initial_state=0.1):
return Sequential([
Embedding(emb_dim),
Recurrence(LSTM(hidden_dim), go_backwards=False),
Dense(num_labels)
])
model = create_model()
print(len(model.layers))
print(model.layers[0].E.shape)
print(model.layers[2].b.value)

Related

Keras feature extractor clarification - which layers does an input goes through

When extracting a model layer output as in the Tensorflow sequential model document example below, does the input x in the code go through the my_first_layer as well before going into my_intermediate_layer layer? Or does it directly go into the my_intermediate_layer layer without going through the my_first_layer layer?
If it directly goes into the my_intermediate_layer, the input to the my_intermediate_layer does not have the transformation done by my_first_layer Conv2D. However, it seems not right to me because the input should go through all the preceding layers.
Please help understand what layers does x go through?
Feature extraction with a Sequential model
initial_model = keras.Sequential(
[
keras.Input(shape=(250, 250, 3)),
layers.Conv2D(32, 5, strides=2, activation="relu", name="my_first_layer"),
layers.Conv2D(32, 3, activation="relu", name="my_intermediate_layer"),
layers.Conv2D(32, 3, activation="relu"),
]
)
# The model goes through the training.
...
# Feature extractor
feature_extractor = keras.Model(
inputs=initial_model.inputs,
outputs=initial_model.get_layer(name="my_intermediate_layer").output,
)
# Call feature extractor on test input.
x = tf.ones((1, 250, 250, 3))
features = feature_extractor(x)
Keras offers higher level of API, which runs on top of the TensorFlow machine learning platform. Keras offers two types of class to define the neural network model, namely 'Sequential Class' and 'Model Class.'
Sequential Class:
It groups a linear stack of layers to form a model, such that each layer has one input and one output tensor. One can add required layers to the defined model (schema-1) as shown below to execute sequentially as name suggests Keras Sequential Class,
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(8, input_shape=(16,)))
model.add(tf.keras.layers.Dense(4))
The schema for defining a sequential model Keras-Sequential Class Definition has shown below (schema-2),
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
model = keras.Sequential(
[
layers.Dense(2, activation="relu", name="layer1"),
layers.Dense(3, activation="relu", name="layer2"),
layers.Dense(4, name="layer3"),
]
)
# Call model on a test input
x = tf.ones((3, 3))
y = model(x)
Model Class
It allows the user to build a custom model along with many layers as shown below,
import tensorflow as tf
inputs = tf.keras.Input(shape=(3,))
x = tf.keras.layers.Dense(4, activation=tf.nn.relu)(inputs)
outputs = tf.keras.layers.Dense(5, activation=tf.nn.softmax)(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
It allows one to create a new functional API model with additional layers Keras - Model Class as follows,
inputs = keras.Input(shape=(None, None, 3))
processed = keras.layers.RandomCrop(width=32, height=32)(inputs)
conv = keras.layers.Conv2D(filters=2, kernel_size=3)(processed)
pooling = keras.layers.GlobalAveragePooling2D()(conv)
feature = keras.layers.Dense(10)(pooling)
Note: The input tensors supports only dicts, lists or tuples but not lists of list, or dicts of dict.
I hope that this helps.

calling a model inside a model with tensorflow : layers do not support float type inputs

for learning i tried to to use tensorflow probability to fit a simple 1 dimensional function (x,y) but i have realized i do something wrong when i call a model inside another model and i really need to understand what.
here is my model
mlp1 = tf.keras.Sequential(
[tf.keras.layers.Input(shape=(1), dtype=tf.float32),
Dense(20, activation="relu", name="layer1"),
Dense(20, activation="relu", name="layer2"),
Dense(1, activation="softplus",name="layer3"),
]
)
class subq0(tf.keras.Model):
def __init__(self):
super().__init__()
self.mlp1=mlp1()
def call(self, inputs):
sigma= self.mlp1(inputs)
return sigma
polynom = subq0()
optimizer = tf.optimizers.SGD(learning_rate=0.0001,momentum=0.9)
polynom.compile(optimizer=optimizer, loss= "mse" )
polynom.build(input_shape=(1,))
polynom.summary()
polynomial.fit(X_train, y_train , epochs= 100, verbose=0, batch_size=64 ,validation_data=(X_val,y_val) )
the error i get is
ValueError: You cannot build your model by calling build if your
layers do not support float type inputs. Instead, in order to
instantiate and build your model, call your model on real tensor data
(of the correct dtype).
The actual error from call is:
'tensorflow.python.framework.ops.EagerTensor' object is not callable.

Categorical_crossentropy loss function has value of 0.0000e +00 for a BiLSTM sentiment analysis model

This is the graph of my model
Model
Code format:
def model_creation(vocab_size, embedding_dim, embedding_matrix,
rnn_units, batch_size,
train_embed=False):
model = Sequential(
[
Embedding(vocab_size, embedding_dim,
weights=[embedding_matrix], trainable=train_embed, mask_zero=True),
Bidirectional(LSTM(rnn_units, return_sequences=True, dropout=0.5)),
Bidirectional(LSTM(rnn_units, dropout=0.25)),
Dense(1, activation="softmax")
])
return model
The embedding layer receive an embedding matrix with value from Word2Vec
This is the code for the embedding matrix:
Embedding Matrix
def create_embedding_matrix(encoder,dict_w2v):
embedding_dim = 50
embedding_matrix = np.zeros((encoder.vocab_size, embedding_dim))
for word in encoder.tokens:
embedding_vector = dict_w2v.get(word)
if embedding_vector is not None: # dictionary contains word
test = encoder.encode(word)
token_id = encoder.encode(word)[0]
embedding_matrix[token_id] = embedding_vector
return embedding_matrix
Dataset
I'm using the amazon product dataset https://jmcauley.ucsd.edu/data/amazon/
This is what the dataframe look like
I'm only interested in overall and reviewText
overall is my Label and reviewText is my Feature
overall has a range of [1,5]
Problem
During training with categorical_crossentropy loss the is at 0.0000e +00, I don't think loss can be minimized well so accuracy is always at 0.1172
Did I configure my model wrong or is there any problem? How do I fix my loss function issue ? Please tell me if it's not clear enough I'll provide more information. I'm not sure what the problem is

How to extract the hidden vector (the output of the ReLU after the third encoder layer) as the image representation

I am implementing an autoencoder using the Fashion Mnsit dataset. The code for the encoder-
class MNISTClassifier(Model):
def __init__(self):
super(MNISTClassifier, self).__init__()
self.encoder = Sequential([
layers.Dense(128, activation = "relu"),
layers.Dense(64, activation = "relu"),
layers.Dense(32, activation = "relu")
])
self.decoder = Sequential([
layers.Dense(64, activation = "relu"),
layers.Dense(128, activation= "relu"),
layers.Dense(784, activation= "relu")
])
def call(self, x):
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return decoded
autoencoder = MNISTClassifier()
now I want to train an SVM classifier on the image representations extracted from the above autoencoder mean
Once the above fully-connected autoencoder is trained, for each image, I want to extract the 32-
dimensional hidden vector (the output of the ReLU after the third encoder layer) as the
image representation and then train a linear SVM classifier on the training images of fashion mnist based on the 32-
dimensional features.
How to extract the output 32-
dimensional hidden vector??
Thanks in Advance!!!!!!!!!!!!
I recommend to use Functional API in order to define multiple outputs of your model because of a more clear code. However, you can do this with Sequential model by getting the output of any layer you want and add to your model's output.
Print your model.summary() and check your layers to find which layer you want to branch. You can access each layer's output by it's index with model.layers[index].output .
Then you can create a multi-output model of the layers you want, like this:
third_layer = model.layers[2]
last_layer = model.layers[-1]
my_model = Model(inputs=model.input, outputs=(third_layer.output, last_layer.output))
Then, you can access the outputs of both of layers you have defined:
third_layer_predict, last_layer_predict = my_model.predict(X_test)

models.set_weights() gives None

I am trying to build an ensamble DNN model. I train e.g. 5 models, take the weights and average them. After that I wanted to clone a first model and assign the new weights. But it does not work.
The Model is built like this:
def build_DNN_model(self):
# initialize the DNN
ann = tf.keras.models.Sequential()
# add first hidden layer
num_neurons = self.num_neurons
ann.add(tf.keras.layers.Dense(units=num_neurons, activation='relu', kernel_initializer=tf.constant_initializer(1.)))
ann.add(tf.keras.layers.Dropout(0.5))
# add second hidden layer
ann.add(tf.keras.layers.Dense(units=num_neurons, activation='relu'))
ann.add(tf.keras.layers.Dropout(0.5))
# add output layer
ann.add(tf.keras.layers.Dense(units=1))
# compile
ann.compile(optimizer='adam', loss='mean_squared_error')
return ann
Then the model is fitted to the data, actually I do 5 models, and fit all of them to the same data.
After that I create a list of KerasModel Objects, called "members".
And now I would like to assign my new weights to a clone of one of the models. But even if I do that:
members[0].set_weights(members[0].get_weights())
it returns me None.
I use Tensoflow 2 version.
I would appreciate your help very much.
You should define the input shape in your first layer of the model
after doing this I simply create 2 models like yours (m1,m2) and assign to m2 the same weights to m1... they are the same
def build_DNN_model(input_dim):
# initialize the DNN
ann = tf.keras.models.Sequential()
# add first hidden layer
num_neurons = 32
ann.add(tf.keras.layers.Dense(units=num_neurons, activation='relu',
kernel_initializer=tf.constant_initializer(1.),
input_dim=input_dim))
ann.add(tf.keras.layers.Dropout(0.5))
# add second hidden layer
ann.add(tf.keras.layers.Dense(units=num_neurons, activation='relu'))
ann.add(tf.keras.layers.Dropout(0.5))
# add output layer
ann.add(tf.keras.layers.Dense(units=1))
# compile
ann.compile(optimizer='adam', loss='mean_squared_error')
return ann
m1 = build_DNN_model((100))
m2 = build_DNN_model((100))
m2.set_weights(m1.get_weights())
# check the weights
[(w1==w2).all() for w1,w2 in zip(m1.get_weights(),m2.get_weights())]
# [True, True, True, True, True, True]
the notebook
EDIT1: assign random weights to m1:
m1.set_weights([np.random.uniform(0,1, i.shape) for i in m1.get_weights()])
EDIT2: here you find the working implementation of model_weight_ensemble in your contest from https://machinelearningmastery.com/polyak-neural-network-model-weight-ensemble/
Creating a simple model:
def create_model1():
model = tf.keras.Sequential()
model.add(tf.keras.Input(shape=(13,)))
model.add(tf.keras.layers.Dense(units = 6, activation='relu', name = 'd1'))
model.add(tf.keras.layers.Dense(units = 2, activation='softmax', name = 'd2'))
return model
Model Architecture:
Looking at layers:
model.layers
Ouput:
[<tensorflow.python.keras.layers.core.Dense at 0x2193acc95c8>,
<tensorflow.python.keras.layers.core.Dense at 0x2193ad3ad08>]
Looking at the weights of 2nd dense layer:
model.layers[1].weights
Output:
[<tf.Variable 'd2/kernel:0' shape=(6, 2) dtype=float32, numpy=
array([[ 0.11061734, 0.61788374],
[ 0.31208295, 0.19295567],
[-0.6812483 , 0.05383837],
[ 0.39284903, 0.69312006],
[-0.519426 , 0.67820543],
[-0.7337165 , 0.11025453]], dtype=float32)>,
<tf.Variable 'd2/bias:0' shape=(2,) dtype=float32, numpy=array([0., 0.], dtype=float32)>]
Setting weights:
new_weights = [tf.random.uniform(shape = (6,2)), tf.random.uniform(shape = (2,))]
model.layers[1].set_weights(new_weights)
For setting weights the shape of new_weights should match the shape of weights of that particular layer.
Here, new_weights is a list containing two values. 1st element is the weight of the kernel and 2nd element is the weight for bias.