For pedagogic purposes, I would like to implement a custom dense relu layer (without biais) using Tensorflow. The idea is to clearly see the backpropagation both for the inputs and for the synaptic weights. The custom gradient is thus not on the level of an activation function but at the level of the call function. I did a first try with a regression problem that I show you below for verification/confirmation if it is correct. In this regression problem, I have n_output neurons of output. I separate a random dataset into n_output classes. For a given input of class j (class_j), the output_neuron_i should respond with a value 1-abs(output_neuron_i - class_j)/n_output. I checked with standart relu and my custom relu and both are showing the same behavior, so I am quite confident that the formula are corrects but I am not entirely sure.
import tensorflow as tf
import os
import numpy as np
import matplotlib.pyplot as plt
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
class CustomReLU(tf.keras.layers.Layer):
def __init__(self, units):
super(CustomReLU, self).__init__()
self.units = units
self.W = None
def build(self, input_shape):
self.W = self.add_weight(
shape=(input_shape[-1], self.units),
initializer=kernel_initializer,
trainable=True,
)
#tf.custom_gradient
def call(self, inputs): #shape=[Batch, n_X]
self.S = tf.matmul(inputs, self.W) #shape=[Batch, n_Y]
self.Y = tf.nn.relu(self.S) #shape=[Batch, n_Y]
#print("self.Y.shape", self.Y.shape)
def custom_grad(dE_dY, variables):
dY_dS = tf.where(self.S > 0, tf.ones_like(self.S), tf.zeros_like(self.S)) #shape=[Batch, n_Y]
dE_dS = dE_dY*dY_dS #shape=[Batch, n_Y]
dS_dX = tf.transpose(self.W) #shape=[n_Y, n_X]
dE_dX = tf.matmul(dE_dS, dS_dX) #shape=[Batch, n_X]
modif_dY_dS = tf.expand_dims(dY_dS, [1]) #shape=[Batch, 1, n_Y]
modif_inputs = tf.expand_dims(inputs, [2]) #shape=[Batch, n_X, 1]
modif_dY_dW = modif_dY_dS*modif_inputs #shape=[Batch, n_X, n_Y]
modif_dE_dY = tf.expand_dims(dE_dY, [1]) #shape=[Batch, 1, n_Y]
dE_dW = modif_dE_dY*modif_dY_dW #shape=[Batch, n_X, n_Y]
dE_dW = tf.reduce_sum(dE_dW, 0, keepdims=False) #shape=[n_X, n_Y]
return dE_dX, [dE_dW] #shape=[Batch, n_X], shape=[n_X, n_Y]
return self.Y, custom_grad
def create_layer(units, layer_type="custom"):
if layer_type=="custom":
return CustomReLU(units)
else:
return tf.keras.layers.Dense(units, activation="relu", use_bias=False, kernel_initializer=kernel_initializer)
nb_hidden_layer = 1
n_input = 100
n_output = 100
size_dataset = n_output*10
nb_hidden_unit = 100
mean_input =0.1
var_input = 1.0
learning_rate = 0.1
kernel_initializer = "glorot_normal"
nb_epoch = 10000
input_vals = tf.random.normal( [size_dataset, n_input], mean=mean_input, stddev=tf.math.sqrt(var_input), dtype=tf.dtypes.float32)
target_vals = np.zeros( shape=(size_dataset, n_output), dtype='float32')
nb_sample_per_class = int(size_dataset/n_output)
for id_output_neuron in range(n_output):
for id_class in range(n_output):
diff_neuron_class = np.abs(id_output_neuron-id_class)
target_vals[id_class*nb_sample_per_class:(id_class+1)*nb_sample_per_class, id_output_neuron] = 1.-diff_neuron_class*(1./n_output)
#print("target_vals", target_vals)
target_vals = tf.convert_to_tensor(target_vals)
model = tf.keras.Sequential()
model.add(tf.keras.Input(shape=(n_input)))
layer_type="custom"
for id_hidden_layer in range(nb_hidden_layer):
model.add(create_layer(nb_hidden_unit, layer_type=layer_type))
model.add(create_layer(n_output, layer_type=layer_type))
loss_obj = tf.keras.losses.MeanSquaredError()
opt_obj = tf.keras.optimizers.SGD(learning_rate)
model.compile(loss=loss_obj, optimizer=opt_obj, run_eagerly = True)
l_loss = []
epoch_range = np.arange(0, nb_epoch)
for i in epoch_range:
print(i, "/",nb_epoch)
outputs= model(input_vals)
loss = loss_obj(target_vals, outputs)
l_loss.append(loss)
model.fit(input_vals, target_vals, epochs=1, batch_size=size_dataset, shuffle=True)
plt.plot(epoch_range, l_loss, label="loss")
plt.ylim(bottom=0)
plt.legend()
plt.grid()
plt.show()
The part "modif..." that computes the gradient of the synaptic weights dE_dW in the custom_grad function does not satisfy me. I think it is inelegant and potentially inefficient. Is there someone who can provide a better solution ?
Related
I am trying to implement a custom variational autoencoder. Following is the code to reproduce.
epsilon_std = 1.0
vx = tf.keras.layers.Input(batch_shape=(None, max_length_output), name='vae_enc_in')
vx_emb = tf.keras.layers.Embedding(
vocab_tar_size,
embedding_dim,
input_length=max_length_output,
name='vae_enc_emb'
)(vx)
vxbi = tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(units, return_sequences=False, recurrent_dropout=0.2, name='vae_enc_lstm'), merge_mode='concat'
)(vx_emb)
vx_drop = tf.keras.layers.Dropout(0.2, name='vae_enc_drop')(vxbi)
vx_dense = tf.keras.layers.Dense(units, activation='linear', name='vae_enc_dense')(vx_drop)
vx_elu = tf.keras.layers.ELU(name='vae_enc_elu')(vx_dense)
vx_drop1 = tf.keras.layers.Dropout(0.2, name='vae_enc_drop2')(vx_elu)
z_mean = tf.keras.layers.Dense(20, name='vae_enc_dense2')(vx_drop1)
z_log_var = tf.keras.layers.Dense(20, name='vae_enc_dense3')(vx_drop1)
def sampling(args):
z_mean, z_log_var = args
epsilon = tf.random.normal(shape=(BATCH_SIZE, 20), mean=0.,
stddev=epsilon_std)
return z_mean + tf.math.exp(z_log_var / 2) * epsilon
z = tf.keras.layers.Lambda(sampling, output_shape=(20,), name='vae_lambda')([z_mean, z_log_var])
repeated_context = tf.keras.layers.RepeatVector(max_length_output, name='vae_repeat')
decoder_h = tf.keras.layers.LSTM(units, return_sequences=True, recurrent_dropout=0.2, name='vae_dec_lstm')
decoder_mean = tf.keras.layers.TimeDistributed(
tf.keras.layers.Dense(vocab_tar_size, activation='linear', name='vae_dec_lstm'),
name='vae_dec_time_dist'
)
h_decoded = decoder_h(repeated_context(z))
x_decoded_mean = decoder_mean(h_decoded)
def zero_loss(y_true, y_pred):
print("ZERO LOSS")
return tf.zeros_like(y_pred)
And then creating a custom vae layer
class VAELayer(tf.keras.layers.Layer):
def __init__(self, batch_size, max_len, **kwargs):
self.is_placeholder = True
super(VAELayer, self).__init__(**kwargs)
self.target_weights = tf.constant(np.ones((batch_size, max_len)), tf.float32)
def vae_loss(self, x, x_decoded_mean):
#xent_loss = K.sum(metrics.categorical_crossentropy(x, x_decoded_mean), axis=-1)
labels = tf.cast(x, tf.int32)
xent_loss = tf.math.reduce_sum(
tfa.seq2seq.sequence_loss(
x_decoded_mean,
labels,
weights=self.target_weights,
average_across_timesteps=False,
average_across_batch=False
),
axis=-1
)
#softmax_loss_function=softmax_loss_f), axis=-1)#, for sampled softmax
kl_loss = - 0.5 * tf.math.reduce_sum(1 + z_log_var - tf.math.square(z_mean) - tf.math.exp(z_log_var), axis=-1)
return tf.math.reduce_mean(xent_loss + kl_loss)
def call(self, inputs):
x = inputs[0]
x_decoded_mean = inputs[1]
print(x.shape, x_decoded_mean.shape)
loss = self.vae_loss(x, x_decoded_mean)
print("Adding loss")
self.add_loss(loss, inputs=inputs)
print("Returning ones like")
return tf.ones_like(x)
I compiled it successfully and also produced a test output by calling the model. But when i try to train, it, It produces the error
TypeError: Tensors are unhashable. (KerasTensor(type_spec=TensorSpec(shape=(), dtype=tf.float32, name=None), name='tf.math.reduce_sum_25/Sum:0', description="created by layer 'tf.math.reduce_sum_25'"))Instead, use tensor.ref() as the key.
Following is the code for compiling and fitting the model
loss_layer = VAELayer(BATCH_SIZE, max_length_output)([vx, x_decoded_mean])
vae = tf.keras.Model(vx, [loss_layer], name='VariationalAutoEncoderLayer')
opt = tf.keras.optimizers.Adam(lr=0.01) #SGD(lr=1e-2, decay=1e-6, momentum=0.9, nesterov=True)
vae.compile(optimizer=opt, loss=[zero_loss])
def vae_sentence_generator():
for ip, tg in train_dataset:
yield tg.numpy()
vae.fit(vae_sentence_generator(steps_per_epoch=steps_per_epoch, epochs=10))
I was doing a Bert finetune and I had OOM issues. I heard a good method to handle this is to use "gradient accumulate". Below are my optimization.py(include the gradient accumulate)
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow as tf
from tensorflow.python.training import optimizer
from tensorflow.python.framework import ops
def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu):
"""Creates an optimizer training op."""
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)
# Implements linear decay of the learning rate.
learning_rate = tf.train.polynomial_decay(
learning_rate,
global_step,
num_train_steps,
end_learning_rate=0.0,
power=1.0,
cycle=False)
# Implements linear warmup. I.e., if global_step < num_warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
if num_warmup_steps:
global_steps_int = tf.cast(global_step, tf.int32)
warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)
global_steps_float = tf.cast(global_steps_int, tf.float32)
warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)
warmup_percent_done = global_steps_float / warmup_steps_float
warmup_learning_rate = init_lr * warmup_percent_done
is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)
learning_rate = (
(1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)
# It is recommended that you use this optimizer for fine tuning, since this
# is how the model was trained (note that the Adam m/v variables are NOT
# loaded from init_checkpoint.)
optimizer = MultistepAdamWeightDecayOptimizer(
learning_rate=learning_rate,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999, # 0.98 ONLY USED FOR PRETRAIN. MUST CHANGE AT FINE-TUNING 0.999,
epsilon=1e-6,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"])
if use_tpu:
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
tvars = tf.trainable_variables()
grads = tf.gradients(loss, tvars)
# This is how the model was pre-trained.
(grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
train_op = optimizer.apply_gradients(
zip(grads, tvars), global_step=global_step)
# Normally the global step update is done inside of `apply_gradients`.
# However, `AdamWeightDecayOptimizer` doesn't do this. But if you use
# a different optimizer, you should probably take this line out.
new_global_step = global_step + 1
train_op = tf.group(train_op, [global_step.assign(new_global_step)])
return train_op
class MultistepAdamWeightDecayOptimizer(optimizer.Optimizer):
"""A basic Adam optimizer that includes "correct" L2 weight decay."""
def __init__(self,
learning_rate,
weight_decay_rate=0.0,
beta_1=0.9,
beta_2=0.999,
n = 1,
epsilon=1e-6,
exclude_from_weight_decay=None,
name="MultistepAdamWeightDecayOptimizer"):
"""Constructs a AdamWeightDecayOptimizer."""
super(MultistepAdamWeightDecayOptimizer, self).__init__(False, name)
self.learning_rate = learning_rate
self.weight_decay_rate = weight_decay_rate
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self._n = n
self.exclude_from_weight_decay = exclude_from_weight_decay
self._n_t = None
def _prepare(self):
super(MultistepAdamWeightDecayOptimizer, self)._prepare()
self._n_t=tf.convert_to_tensor(self._n, name="n")
def _create_slots(self,var_list):
super(MultistepAdamWeightDecayOptimizer, self)._create_slots(var_list)
first_var = min(var_list, key=lambda x: x.name)
self._create_non_slot_variable(initial_value=0 if self._n == 1 else 1,
name="iter",
colocate_with=first_var)
for v in var_list:
self._zeros_slot(v,"grad_acc",self._name)
def _get_iter_variable(self):
if tf.contrib.eager.in_eager_mode():
graph = None
else:
graph = tf.get_default_graph()
return self._get_non_slot_variable("iter", graph=graph)
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""See base class."""
update_ops = []
var_list = [v for g, v in grads_and_vars if g is not None]
with ops.init_scope():
self._create_slots(var_list)
self._prepare()
for(grad, param) in grads_and_vars:
if grad is None or param is None:
continue
grad_acc = self.get_slot(param, "grad_acc")
param_name = self._get_variable_name(params.name)
m = tf.get_variable(name=param_name + "/adam_m", shape=param.shape.as_list(),
dtype=tf.float32,trainable=False, initializer=tf.zeros_initializer())
v = tf.get_variable(name =param_name + "/adam_v", shape=param.sahpe.as_list(),
dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer())
def _apply_adam(grad_acc, grad, param, m, v):
total_grad = (grad_acc + grad) / tf.cast(self._n_t, grad.dtype)
next_m = (
tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, total_grad))
next_v = (
tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,
tf.square(total_grad)))
update = next_m / (tf.sqrt(next_v) + self.epsilon)
if self._do_use_weight_decay(param_name):
update += self.weight_decay_rate * param
update_with_lr =self.learning_rate * update
next_param = param - update_with_lr
adam_op = tf.group(param.assign(next_param), m.assign(next_m),
v.assign(next_v))
with tf.control_dependencies([adam_op]):
grad_acc_to_zero_op = grad_acc.assign(tf.zero_like(grad_acc), use_locking=self._use_locking)
return tf.group(adam_op, grad_acc_to_zero_op)
def _accumulate_gradient(grad_acc, grad):
assign_up = tf.assign_add(grad_acc, grad, use_locking=self._use_locking)
return tf.group(assign_op)
update_op = tf.cond(tf.equal(self._get_iter_variable(),0),
lambda: _apply_adam(grad_acc, grad, param,m, v),
lambda: _accumulate_gradient(grad_acc, grad))
update_ops.append(update_op)
apply_updates = self._finish(update_ops, name_scope=name)
return apply_updates
def _finish(self, update_ops, name_scope):
iter_=self._get_iter_variable()
with tf.control_dependencies(update_ops):
with tf.colocate_with(iter_):
update_iter = iter_.assign(tf.mod(iter_+1, self._n_t),
use_locking=self._use_locking)
return tf.group(
*update_ops + [update_iter], name=name_scope)
def _do_use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if not self.weight_decay_rate:
return False
if self.exclude_from_weight_decay:
for r in self.exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
def _get_variable_name(self, param_name):
"""Get the variable name from the tensor name."""
m = re.match("^(.*):\\d+$", param_name)
if m is not None:
param_name = m.group(1)
return param_name
After I used this optimization.py, i could use large batch. But loss did not decrease and after 300 steps(i got 550000 training data, batch size 64, iteration 1000 and epoch 20), it said: train loop marked as finished and stopped.
I am not sure what problem is, could you please help me out? thanks.
I implemented neural network model with tensorflow(version 2.0) on Python3
I don't know the code works properly because loss value don't almost change.
The code is wrong
or
The model is too many parameter(this mean that the code is right)?
Please tell me whether the code works properly.
The following is the code.
import tensorflow as tf
import numpy as np
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
class Model(object):
def __init__(self):
self.var_list = []
self.w_layer1 = tf.Variable(tf.random.normal(shape=[28*28, 1000], stddev=0.3,dtype=tf.float64))
self.b_layer1 = tf.Variable(tf.random.normal(shape=[1,], stddev=0.1,dtype=tf.float64))
self.w_layer2 = tf.Variable(tf.random.normal(shape=[1000, 100], stddev=0.3,dtype=tf.float64))
self.b_layer2 = tf.Variable(tf.random.normal(shape=[1,], stddev=0.1,dtype=tf.float64))
self.w_layer3 = tf.Variable(tf.random.normal(shape=[100, 100], stddev=0.3,dtype=tf.float64))
self.b_layer3 = tf.Variable(tf.random.normal(shape=[1,], stddev=0.1,dtype=tf.float64))
self.w_layer4 = tf.Variable(tf.random.normal(shape=[100, 10], stddev=0.3,dtype=tf.float64))
self.b_layer4 = tf.Variable(tf.random.normal(shape=[1,], stddev=0.1,dtype=tf.float64))
self.var_list.append(self.w_layer1)
self.var_list.append(self.b_layer1)
self.var_list.append(self.w_layer2)
self.var_list.append(self.b_layer2)
self.var_list.append(self.w_layer3)
self.var_list.append(self.b_layer3)
self.var_list.append(self.w_layer4)
self.var_list.append(self.b_layer4)
def __call__(self, x):
return self.w*x+self.b
def dense_layer(self, inputs, w, b):
z = tf.matmul(inputs, w) + b
return tf.nn.relu(z)
def output_layer(self, inputs, w, b):
return tf.matmul(inputs, w) + b
def flattend(self, inputs):
inputs = tf.cast(inputs, tf.float64)
return tf.reshape(inputs, [-1, 28*28])
def loss(self, outputs, targets):
predicted_y = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = outputs, labels = targets))
return predicted_y
def grad(self, x, target_y):
with tf.GradientTape() as tape:
tape.watch(self.var_list)
loss_value = self.loss(self.run(x), target_y)
return tape.gradient(loss_value, self.var_list)
def run(self, inputs):
inputs = self.flattend(inputs)
layer1 = self.dense_layer(inputs, self.w_layer1, self.b_layer1)
layer2 = self.dense_layer(layer1, self.w_layer2, self.b_layer2)
layer3 = self.dense_layer(layer2, self.w_layer3, self.b_layer3)
layer4 = self.output_layer(layer3, self.w_layer4, self.b_layer4)
return layer4
def optimizer(self):
opt = tf.keras.optimizers.SGD(learning_rate=0.01)
return opt
def make_onehot_labels(labels):
depth = 10
one_hot_labels = tf.one_hot(labels, depth)
return one_hot_labels
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
train_images = train_images/255.0
test_images = test_images/255.0
train_labels = make_onehot_labels(train_labels)
test_labels = make_onehot_labels(test_labels)
ds_train_x = tf.data.Dataset.from_tensor_slices(train_images)
ds_train_y = tf.data.Dataset.from_tensor_slices(train_labels)
train_dataset = tf.data.Dataset.zip((ds_train_x, ds_train_y)).shuffle(1000).repeat().batch(300)
train_images = tf.convert_to_tensor(train_images)
train_labels = tf.convert_to_tensor(train_labels)
test_images = tf.convert_to_tensor(test_images)
test_labels = tf.convert_to_tensor(test_labels)
count = 1
model = Model()
opt = model.optimizer()
print(model.loss(model.run(train_images), train_labels))
for epoch in range(10):
for data in train_dataset:
if count%200==0:
print(model.loss(model.run(train_images), train_labels))
#print(grads)
break
grads = model.grad(data[0], data[1])
opt.apply_gradients(zip(grads, model.var_list))
count = count+1
the following is the result which the above code executed
tf.Tensor(184.81706096058622, shape=(), dtype=float64)
tf.Tensor(1.2104797483683287, shape=(), dtype=float64)
tf.Tensor(1.2104797483683287, shape=(), dtype=float64)
tf.Tensor(1.2104797483683287, shape=(), dtype=float64)
tf.Tensor(1.2104797483683287, shape=(), dtype=float64)
The issue is in the following part
for epoch in range(10):
for data in train_dataset:
if count%200==0:
print(model.loss(model.run(train_images), train_labels))
#print(grads)
break
grads = model.grad(data[0], data[1])
opt.apply_gradients(zip(grads, model.var_list))
count = count+1
You have a break within the if condition, meaning you break your training loop (and restart a new epoch) when you hit count%200==0. Remove the break and you'll see the error rate going down.
To elaborate on the issue, as soon as you reach count==200 you break the loop, and the counter does not increase anymore so you're basically not reaching anything beyond that if condition after 200 iterations ( this anything beyond includes your gradient application).
I have implemented the Basic MNIST model with Custom convolution layer as shown below. The problem is that the Gradients are always 'None' for the Custom Layer and so the learning does not happens during back propagation, as the Grad has None values.
I have debugged the outputs of the layers during forward pass and they are OK.
Here is the sample code, for simplicity I have passed image of 'Ones' and have just returned the matrix from the custom layer.
I have tried my best but could make it work any help is very much appreciated in advance
following code is executable and raises the
warning
:tensorflow:Gradients do not exist for variables ['cnn/custom_conv2d/kernel:0', 'cnn/custom_conv2d/bias:0', 'cnn/custom_conv2d_1/kernel:0', 'cnn/custom_conv2d_1/bias:0', 'cnn/custom_conv2d_2/kernel:0', 'cnn/custom_conv2d_2/bias:0'] when minimizing the loss.
import numpy as np
import tensorflow as tf
from grpc.beta import interfaces
class CustomConv2D(tf.keras.layers.Conv2D):
def __init__(self, filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='glorot_uniform',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
__name__ = 'CustomConv2D',
**kwargs
):
super(CustomConv2D, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs )
def call(self, input):
(unrolled_mat, filters, shape) = self.prepare(input)
#unrolled_mat=unrolled inputs
#filters=unrolled kernels of the lAYER
#convolution through unrolling
conv_result = tf.tensordot(unrolled_mat, filters, axes=1)
result=tf.convert_to_tensor(tf.reshape(conv_result, shape))
return result
def prepare(self, matrix):
batches,rows,cols,channels=matrix.shape
kernel_size = self.kernel_size[0]
unrolled_matrices=None
# start = timer()
for batch in range(batches):
unrolled_maps=None
for chanel in range(channels):
unrolled_map = self.unroll(batch, cols, kernel_size, matrix, rows,chanel)
if unrolled_maps is None:
unrolled_maps = unrolled_map
else:
unrolled_maps=np.append(unrolled_maps,unrolled_map,axis=1)
unrolled_maps = np.reshape(unrolled_maps,(-1,unrolled_maps.shape[0],unrolled_maps.shape[1]))
if unrolled_matrices is None:
unrolled_matrices = unrolled_maps
else:
unrolled_matrices = np.concatenate((unrolled_matrices, unrolled_maps))
kernels=self.get_weights()
kernels=np.reshape(kernels[0],(unrolled_matrices[0].shape[1],-1))
shp=(batches,rows-(kernel_size-1),cols-(kernel_size-1),self.filters)
matrix=unrolled_matrices
return (matrix, kernels, shp)
def unroll(self, batch, cols, kernel_size, matrix, rows, chanel):
# a=np.zeros((shape))
unrolled_feature_map = None
for x in range(0, rows - (kernel_size - 1)):
for y in range(0, (cols - (kernel_size - 1))):
temp_row = None # flattened kernal at single position
for k in range(kernel_size):
for l in range(kernel_size):
if temp_row is None:
temp_row = matrix[batch, x + k, y + l, chanel]
# print(matrix[batch, x + k, y + l])
else:
temp_row = np.append(temp_row, matrix[batch, x + k, y + l, chanel])
# print(matrix[batch, x + k, y + l])
if unrolled_feature_map is None:
unrolled_feature_map = np.reshape(temp_row,
(-1, kernel_size * kernel_size)) # first row of unrolled matrix added
else:
unrolled_feature_map = np.concatenate((unrolled_feature_map, np.reshape(temp_row,
(-1, kernel_size * kernel_size)))) # concatinate subsequent row to un_mat
unrolled_feature_map = np.reshape(unrolled_feature_map,( unrolled_feature_map.shape[0], unrolled_feature_map.shape[1]))
# print(unrolled_feature_map.shape)
matrix=unrolled_feature_map
return matrix
class CNN(tf.keras.Model):
def __init__(self):
super(CNN, self).__init__()
self.learning_rate = 0.001
self.momentum = 0.9
self.optimizer = tf.keras.optimizers.Adam(self.learning_rate, self.momentum)
self.conv1 = CustomConv2D(filters = 6, kernel_size= 3, activation = 'relu') ## valid means no padding
self.pool1 = tf.keras.layers.MaxPool2D(pool_size=2) # default stride??-
self.conv2 = CustomConv2D(filters = 16, kernel_size = 3, activation = 'relu')
self.pool2 = tf.keras.layers.MaxPool2D(pool_size = 2)
self.conv3 = CustomConv2D(filters=120, kernel_size=3, activation='relu')
self.flatten = tf.keras.layers.Flatten()
self.fc1 = tf.keras.layers.Dense(units=82,kernel_initializer='glorot_uniform')
self.fc2 = tf.keras.layers.Dense(units=10, activation = 'softmax',kernel_initializer='glorot_uniform')
def call(self, x):
x = self.conv1(x) # shap(32,26,26,6) all (6s 3s 6s 3s)
x = self.pool1(x) # shap(32,13,13,6) all (6s)
x = self.conv2(x) # shap(32,11,11,16) all(324s)
x = self.pool2(x) # shap(32,5,5,16)
x = self.conv3(x) # shap(32,3,3,120)all(46656)
x = self.flatten(x) # shap(32,1080)
x = self.fc1(x) # shap(32,82)
x = self.fc2(x) # shap(32,10)
return x
def feedForward(self, image, label):
accuracy_object = tf.metrics.Accuracy()
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
with tf.GradientTape() as tape:
feedForwardCompuation = self(image, training=True)
self.loss_value = loss_object(label, feedForwardCompuation)
grads = tape.gradient(self.loss_value, self.variables)
self.optimizer.apply_gradients(zip(grads, self.variables))
accuracy = accuracy_object(tf.argmax(feedForwardCompuation, axis=1, output_type=tf.int32), label)
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train=x_train.astype('float32')
y_train = y_train.astype('float32')
image=x_train[0].reshape((1,28,28,1))
label=y_train[0]
cnn=CNN()
cnn.feedForward(image,label)
UPDATE: I am not using the builtin TF conv fucntion rather I am implementing my own custom convolution operation via Matrix unrolling method(unrolled map*unrolled filters). But the Tap.gradient returns "None" for the custom layers however when I use the builtin conv2d function of TF then it works fine!
I have Added the actual code of the operation
Snapshot of grads while debugging
Problem is that the Convolution Operation is not happening in the Class, CustomConv2D. Neither the call Method, nor the customConv Method is performing Convolution Operation, but it is just returning the Input, as it is.
Replacing the line, return self.customConv(matrix) in the call method of CustomConv2D Class with return super(tf.keras.layers.Conv2D, self).call(matrix) will perform the actual Convolutional Operation.
One more change is to invoke the call method of CNN class by including the line, _ = cnn(X_reshaped) before the line, cnn.feedForward(image,label)
By doing the above 2 changes, Gradients will be added.
I am building a Keras Custom layer with some Tensorflow support. Before that I wanted to test whether a Convolution2D layer works properly if I write a Keras layer with Tensorflow's conv2d in the call function.
class Convolutional2D(Layer):
def __init__(self, filters=None, kernel_size=None, padding='same', activation='linear', strides=(1,1), name ='Conv2D', **kwargs):
self.filters = filters
self.kernel_size = kernel_size
self.padding = padding
self.activation = activation
self.strides = strides
self.name = name
self.input_spec = [InputSpec(ndim=4)]
super(Convolutional2D, self).__init__(**kwargs)
def call(self, input):
out = tf.layers.conv2d(inputs=input, filters=self.filters, kernel_size=self.kernel_size, strides=self.strides, padding=self.padding,
data_format='channels_last')
return(out)
def compute_output_shape(self, input_shape):
batch_size = input_shape[0]
width = input_shape[1]/self.strides[0]
height = input_shape[2]/self.strides[1]
channels = self.filters
return(batch_size, width, height, channels)
def get_config(self):
config = {'filters': self.filters, 'kernel_size': self.kernel_size, 'padding': self.padding, 'activation':self.activation, 'strides':self.strides,
'name':self.name}
base_config = super(Convolutional2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
This compiles properly but when the I use model.summary() it does not calculate the number of parameters for this layer.
What do I have to do so that when I check the total number of parameters of the model the number includes the trainable number of parameters of this layer?
I have found the answer to this problem.
def build(self, input_shape):
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
kernel_shape = self.kernel_size + (input_dim, self.filters)
self.kernel = self.add_weight(shape=kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.filters,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
# Set input spec.
self.input_spec = InputSpec(ndim=self.rank + 2,
axes={channel_axis: input_dim})
self.built = True
The add weights defines the number of parameters which I have not done in my code. But that does not hamper the performance of the model. It works fine except for the fact one cannot get the number of parameters specification.