How to add InstanceNormalization on Tensorflow/keras - tensorflow

I am new to TensorFlow and Keras, I have been making a dilated resnet and wanted to add instance normalization on a layer but I could not as it keeps throwing errors.
I am using tensorflow 1.15 and keras 2.1. I commented out the BatchNormalization part which works and I tried to add instance normalization but it cannot find the module.
Thanks a lot for your suggestions
from keras.layers import Conv2D
from keras.layers.normalization import BatchNormalization
from keras.optimizers import Nadam, Adam
from keras.layers import Input, Dense, Reshape, Activation, Flatten, Embedding, Dropout, Lambda, add, concatenate, Concatenate, ConvLSTM2D, LSTM, average, MaxPooling2D, multiply, MaxPooling3D
from keras.layers import GlobalAveragePooling2D, Permute
from keras.layers.advanced_activations import LeakyReLU, PReLU
from keras.layers.convolutional import UpSampling2D, Conv2D, Conv1D
from keras.models import Sequential, Model
from keras.utils import multi_gpu_model
from keras.utils.generic_utils import Progbar
from keras.constraints import maxnorm
from keras.activations import tanh, softmax
from keras import metrics, initializers, utils, regularizers
import tensorflow as tf
import numpy as np
import math
import os
import sys
import random
import keras.backend as K
epsilon = K.epsilon()
def basic_block_conv2D_norm_elu(filters, kernel_size, kernel_regularizer=regularizers.l2(1e-4),act_func="elu", normalize="Instance", dropout='0.15',
strides=1,use_bias = True,kernel_initializer = "he_normal",_dilation_rate=0):
def f(input):
if kernel_regularizer == None:
if _dilation_rate == 0:
conv = Conv2D(filters=filters, kernel_size=kernel_size, strides=strides,
padding="same", use_bias=use_bias)(input)
else:
conv = Conv2D(filters=filters, kernel_size=kernel_size, strides=strides,
padding="same", use_bias=use_bias,dilation_rate=_dilation_rate)(input)
else:
if _dilation_rate == 0:
conv = Conv2D(filters=filters, kernel_size=kernel_size, strides=strides,
kernel_initializer=kernel_initializer, padding="same", use_bias=use_bias,
kernel_regularizer=kernel_regularizer)(input)
else:
conv = Conv2D(filters=filters, kernel_size=kernel_size, strides=strides,
kernel_initializer=kernel_initializer, padding="same", use_bias=use_bias,
kernel_regularizer=kernel_regularizer, dilation_rate=_dilation_rate)(input)
if dropout != None:
dropout_layer = Dropout(0.15)(conv)
if normalize == None and dropout != None:
norm_layer = conv(dropout_layer)
else:
norm_layer = InstanceNormalization()(dropout_layer)
# norm_layer = BatchNormalization()(dropout_layer)
return Activation(act_func)(norm_layer)
return f

There is no such thing as InstanceNormalization(). In Keras you do not have a separate layer for InstanceNormalisation. (Which doesn't mean that you can't apply InstanceNormalisation )
In Keras we have tf.keras.layers.BatchNormalization layer which can be used to apply any type of normalization.
This layer has following parameters:
axis=-1,
momentum=0.99,
epsilon=0.001,
center=True,
scale=True,
beta_initializer="zeros",
gamma_initializer="ones",
moving_mean_initializer="zeros",
moving_variance_initializer="ones",
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
**kwargs
)
Now you can change your axis parameter to generate the Instance normalisation Layer or any other type of normalisation.
The formula for BatchNormalisation and Instance Normalisation is given as:
Now, Let's Assume you have Channel first implementation i.e. [B,C,H,W] If you want to calculate BatchNormalisation then you need to give your channel axis as the axis in the BatchNormalisation() layer. In this case it will calculate C means and standard deviations
BatchNormalisation layer : tf.keras.layers.BatchNormalization(axis=1)
And If you want to calculate InstanceNormalisation then Just give set your axis as the axis of Batch and Channel. In this case it will calculate B*C means and standard deviations
InstanceNormalisation layer: tf.keras.layers.BatchNormalization(axis=[0,1])
Update 1
While using batch Normalisation you must keep training =1 if you want to use it as InstanceNormalisation
Update 2
You can directly use the inbuilt InstanceNormalisation
given as below
https://www.tensorflow.org/addons/api_docs/python/tfa/layers/InstanceNormalization

Related

Masking zeroes in LSTM

My data set is satellite observation which includes a lot of zeroes so that highly effect my final simulation results.
I have two sets of input data, dynamic ones (X_dynamic_LSTM.shape (95931, 1, 5)) which change through time series and static ones (X_static_MLP.shape (95931, 10)) which is not change. For dynamic ones I used LSTM and for static ones the MLP. I Concatenate the two and get the final results by another MLP.
Can you suggest how should I ignore these zero variables in my prediction dataframe??? I know about Masking and Embedding but don't know how to add them in my code!
from tensorflow.keras.layers import Input, LSTM, Dense, Concatenate
from tensorflow.keras.models import Model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Masking
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Embedding
lstm_input = Input(shape=(X_dynamic_LSTM.shape[1], X_dynamic_LSTM.shape[2]))
x = Masking(mask_value=0.)(lstm_input)
x = LSTM(70, activation='tanh', return_sequences=True)(x)
x = Dropout(0.3)(x)
x = LSTM(35)(x)
x = Dropout(0.3)(x)
x = Dense(1, activation='tanh')(x)
#mlp input with additonal 3 variables at t=t
mlp_input=Input(shape=(X_static_MLP.shape[1]))
mlp = Dense(30, activation='relu')(mlp_input)
mlp = Dense(20, activation='relu')(mlp)
merge = Concatenate()([x, mlp])
hidden1 = Dense(5, activation='relu')(merge)
mlp_out = Dense(1, activation='relu')(hidden1)
model = Model(inputs=[lstm_input, mlp_input],outputs=mlp_out)
#compile the model
model.compile(loss='mae', optimizer='adam')
#fit the model
model.fit([X_dynamic_LSTM, X_static_MLP], y_train, batch_size=40,
epochs=10, validation_split=0.2)
use embedding layer in your first layer
you can use this link
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.layers.Embedding())

build two Sequential lstm networks

I have the following model, I want to build the same sequentional network and finally concate the outputs of the two network. Here is my model:
import numpy as np
import tensorflow as tf
from keras.models import Sequential, Model,load_model
from keras.layers import Dense, Dropout, Activation, Flatten, LSTM, Embedding, Input, concatenate, Lambda
from keras.utils import np_utils
from sklearn.metrics import mean_squared_error
#from keras.utils.vis_utils import plot_model
import keras
from keras_self_attention import SeqSelfAttention, SeqWeightedAttention
X1 = np.random.normal(size=(100,1,2))
X2 = np.random.normal(size=(100,1,2))
X3 = np.random.normal(size=(100,1,2))
Y = np.random.normal(size=(100,18))
model = Sequential()
model.add(LSTM(units=50, return_sequences=True, input_shape=(X1.shape[1],X1.shape[2])))
model.add(LSTM(units=50))
model.add(Dropout(0.2))
model.add(Dense(units=18))
model.compile(optimizer = 'adam', loss = 'mean_squared_error',metrics = ['MAE'])
model.fit(X1, Y, epochs =1, batch_size = 100)
Here is the model. I want to add the red part to the model. Can anybody help me? Thanks
it is better to use the Functionnal API to handle multiple inputs :
def sub_model(input):
mod1=LSTM(50,return_sequences=True)(input)
mod1=LSTM(50,dropout=0.2)(mod1)
return Dense(18)(mod1)
inp1=Input(shape=(1,2))
inp2=Input(shape=(1,2))
mod1=sub_model(inp1)
mod2=sub_model(inp2)
concat=Concatenate()([mod1,mod2])
output=Dense(18)(concat)
model=models.Model([inp1,inp2],output)
which gives you :
To train it, you can use model.fit() like :
model.fit([X1,X2],y)

Keras-TF learnable division/arbitrary operation layer

I am searching for a layer that performs an element-wise division of the input but of course the parameters of this division must be learned, just as those of a standard conv2D layer.
I found this:
https://www.tensorflow.org/api_docs/python/tf/keras/layers/Multiply
but i don't think its what i want, because i want the division parameters to be learned, not divide 2 layers.
With a dense layer, dot products are computed, which is NOT what I want. I am looking for ELEMENT-WISE multiplication/division.
The sample code for a Custom Layer that performs an element-wise division of the input with the parameters(Weights) of this division learned during Training, is shown below:
%tensorflow_version 2.x
from tensorflow import keras
from tensorflow.keras import Input
from tensorflow.keras.layers import Flatten, Dense
from tensorflow.keras.models import Model, Sequential
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.layers import Layer
import numpy as np
class MyLayer(Layer):
def __init__(self, output_dims, **kwargs):
self.output_dims = output_dims
super(MyLayer, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
self.kernel = self.add_weight(name='kernel',
shape=self.output_dims,
initializer='ones',
trainable=True)
super(MyLayer, self).build(input_shape) # Be sure to call this somewhere!
def call(self, x):
# Dividing Input with Weights
return tf.divide(x, self.kernel)
def compute_output_shape(self, input_shape):
return (self.output_dims)
mInput = np.array([[1,2,3,4]])
inShape = (4,)
net = Sequential()
outShape = (4,)
l1 = MyLayer(outShape, input_shape= inShape)
net.add(l1)
net.compile(loss='mean_absolute_error', optimizer='adam', metrics=['accuracy'])
p = net.predict(x=mInput, batch_size=1)
print(p)
Hope this helps. Happy Learning!

Reproduce same results on each run - Keras, Google Colab

I run the following code in Google Colab(with GPU):
import random
random.seed(1)
import numpy as np
from numpy.random import seed
seed(1)
from tensorflow import set_random_seed
set_random_seed(2)
import pandas as pd
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.layers import Flatten, Dense, Lambda, SimpleRNN
from keras.optimizers import *
from keras.utils import np_utils
from keras.initializers import *
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, roc_auc_score, auc, precision_recall_curve
from sklearn.metrics import confusion_matrix
from keras.callbacks import EarlyStopping
from keras import backend as K
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
##Loading dataset train and validation files, the files are same for every run
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=5)
print("***********************************************************************************************")
def make_model():
model = Sequential()
model.add(Conv2D(10,(5,5), kernel_initializer=glorot_uniform(seed=1), input_shape = (22,10,1), use_bias = True, activation = "relu", strides = 1, padding = "valid"))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
model.add(Dense(20, kernel_initializer=glorot_uniform(seed=1), activation = "relu"))
model.add(Lambda(lambda x: tf.expand_dims(x, axis=1)))
model.add(SimpleRNN(20, kernel_initializer=glorot_uniform(seed=1), activation="relu",return_sequences=False))
model.add(Dense(1, kernel_initializer=glorot_uniform(seed=1), activation="sigmoid"))
opti = SGD(lr = 0.01)
model.compile(loss = "binary_crossentropy", optimizer = opti, metrics = ["accuracy"])
return model
model = make_model()
model.fit(x_train, y_train, validation_data = (x_validation,y_validation), epochs = 50, batch_size = 20, verbose = 2, callbacks=[es])
Despite setting all seed values, my prediction results of the model are different on subsequent runs. The training and testing of the model happens in the same Colab cell.
You are dealing with floating point numbers that are multiplied and added on different threads and can therefore happen in different order. Floating point additions and multiplications are not commutative. See What Every Computer Scientist Should Know About Floating-Point Arithmetic.

Eager Mode: Using Sequentials within tf.keras.Model

I am transitioning from Pytorch to TensorFlow 1.12 and would like to know whether it is possible to define tf.keras.Sequential classes within a tf.keras.Model and run those in eager mode.
I constructed this minimum non-working example and would be grateful if someone could advise where I am going wrong. I have also used tf.contrib.eager.Network classes (with more success) however, since they are scheduled for deprecation I tried avoiding those.
import numpy as np
import tensorflow as tf
import tensorflow.contrib.eager as tfe
from keras.models import Sequential
from keras.layers import Dense, Activation
from tensorflow.train import AdamOptimizer
tf.enable_eager_execution()
class MLP(tf.keras.Model):
def __init__(self, in_dim, out_dim, hidden_dim, num_layers, activation):
super(MLP, self).__init__()
model = Sequential()
in_features = in_dim
for layer in range(num_layers):
model.add(Dense(hidden_dim,))
model.add(Activation(activation))
in_features = hidden_dim
model.add(Dense(out_dim, input_shape=(hidden_dim,)))
self.model = model
def call(self, inputs):
return self.model(inputs)
model = MLP(10, 1, 20, 4, 'relu')
optim = AdamOptimizer(learning_rate=1e-4)
for v in model.variables:
print(v)
z = tf.convert_to_tensor(np.random.randn(100, 10), dtype=tf.float32)
with tfe.GradientTape() as tape:
tape.watch(z)
u = model(z)
loss = tf.reduce_mean(tf.abs(u))
grad = tape.gradient(loss, model.trainable_variables)
optim.apply_gradients(zip(grad, model.trainable_variables))
print(loss.numpy())
Use
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation
instead of:
from keras.models import Sequential
from keras.layers import Dense, Activation