Make a convolution kernel in Keras using the outputs of layer - tensorflow

How can I create a proper size kernel by the cross product of two outputs of my net, and then matrix (tensor) will be the convolutional kernel in the later layer. i.e.
def CrossMult(inputs):
x0, x1 = inputs
#x0 = tf.keras.backend.transpose(x0)
x1 = tf.keras.backend.transpose(x1)
# you apply layer operations to layers
C = keras.layers.dot(axis=-1)(x0,x1)
return C
def Conv1d(inputs):
x, kernel = inputs
Recon = keras.backend.conv1d(x, kernel, strides=1, padding='same',
dilation_rate=1)
return Recon
input0 = input(...
x0 = ConvLayer2(x0)
x1 = ConvLayer2(x1)
layer_conv_kernel = Lambda(Conv1d)
layer_cross_prod = Lambda(CrossMult)
#kernel = keras.layers.Multiply()([x0, x1])
Kernel = layer_cross_prod([x0, x1])
#The Kernelis the cross-convolution between two output vectors and this matrix will be the convolutional kernel in the later layer.
Recon = layer_conv_kernel([input0, kKernel])
# This line raises an error!
# the size of Kernel will be (None, M,N)(error)
Recon = keras.backend.conv1d(input1, Kernel, strides=1, padding='same', dilation_rate=1)
# This line raises another error!
Recon = Conv1D(1, width, strides=1, activation='relu', padding='same')(Recon)

Related

How to effectively infer artifact removal cnn models?

So earlier, I trained the SNET model on images present here for the purpose of artifact removal. The hyperparameters with which I have trained the model are mentioned below:
So SNET has eight convolution-based heads that would output a certain size of patch in the image. The input would be the same patch that has jpeg artifacts artificially added into them. The predicted and the ground truth patch would be compared with one another, and then MSE loss between them is backpropagated.
The hyperparameters, that I used:
learning_rate = 0.0001, min_learning_rate = 0.000001 for exponential scheduler
optimizer = Adam
loss metric = MSE(mean squared error)
patch_size = 48 x 48
Training batch-size = 16
Evaluation metrics:
PSNR: This is used for quality measurement between the original and reconstructed image.
SSIM: A common metric is to quantify the difference in the values of each of the corresponding pixels between the sample and the reference images.
So while training the images, I was evaluating the model's performance at every step. So there are eight 48 x 48 patches of the same image, since there 8 heads in the model. The model was trained for 100 iterations. Outputs of random patches from 998 to 999 iterations are given below:
The first image is the artifact-added image, the second is the predicted one, third is the ground truth image.
After training the model, I had to test them on bigger images with a larger context(that has an object). So instead of resizing the images to 48 x 48, I divided them into patches of 48 x 48, therefore only tested on images that have width and height values that are multiples of 48. But the problem is that, though the image has high psnr and ssim values, there is a fine gap between patch to patch as shown below:
Is there a way to efficiently tackle this issue? Please suggest, open to any kind of feedback.
Below is the code for model that I used:
import tensorflow as tf
from PIL import Image
import cv2
import tensorflow as tf
import numpy as np
import os
def MSE(input,target):
#return tf.reduce_sum(tf.reduce_mean(tf.abs(input - target),axis=0))
return tf.reduce_mean(tf.abs(input - target))
initializer = tf.initializers.VarianceScaling()
def EncoderBlock(x, activation = tf.keras.layers.LeakyReLU(alpha=0.2), nf = 256):
x = tf.keras.layers.Conv2D(nf, 5, strides=1, padding='same', kernel_initializer=initializer, use_bias=True)(x)
x = activation(x)
x = tf.keras.layers.Conv2D(nf, 5, strides=1, padding='same', kernel_initializer=initializer, use_bias=True)(x)
x = activation(x)
return x
def DecoderBlock(x, activation = tf.keras.layers.LeakyReLU(alpha=0.2), nf = 256):
x = tf.keras.layers.Conv2D(nf, 5, strides=1, padding='same', kernel_initializer=initializer, use_bias=True)(x)
x = activation(x)
x = tf.keras.layers.Conv2D(3, 5, strides=1, padding='same', kernel_initializer=initializer, use_bias=True)(x)
x = activation(x)
return x
def ConvolutionalUnit(x, structure_type = 'classic', activation = tf.keras.layers.LeakyReLU(alpha=0.2), nf = 256):
residual = x
if structure_type == "classic":
x = tf.keras.layers.Conv2D(nf, 5, strides=1, padding='same', kernel_initializer=initializer, use_bias=True)(x)
x = activation(x)
x = tf.keras.layers.Add()([x, residual])
elif structure_type == "advanced":
x = tf.keras.layers.Conv2D(nf, 5, strides=1, padding='same', kernel_initializer=initializer, use_bias=True)(x)
x = activation(x)
x = tf.keras.layers.Conv2D(nf, 5, strides=1, padding='same', kernel_initializer=initializer, use_bias=True)(x)
x = tf.keras.layers.Lambda(lambda x: x * 0.1)(x)
x = tf.keras.layers.Add()([x, residual])
return x
def S_Net(channels = 3, num_metrics=8 , structure_type='advanced', nf = 256):
inputs = tf.keras.layers.Input(shape=[None, None, channels])
encoder = EncoderBlock(inputs, nf = nf)
convolution_units = []
decoders = []
for i in range(num_metrics):
convolution_units.append(ConvolutionalUnit( convolution_units[-1] if len(convolution_units)>0 else ConvolutionalUnit(encoder, nf=nf), structure_type = structure_type, nf=nf))
decoders.append(DecoderBlock(convolution_units[-1],nf=nf))
return tf.keras.Model(inputs=[inputs], outputs=decoders)

Keras model, getting the same loss even after changing the weights

I am trying to implement a meta learining for the omniglot data set but something is not right.
Here is the code:
def get_siamese_model(input_shape):
"""
Model architecture based on the one provided in: http://www.cs.utoronto.ca/~gkoch/files/msc-thesis.pdf
"""
# Define the tensors for the two input images
left_input = Input(input_shape)
right_input = Input(input_shape)
# Convolutional Neural Network
model = Sequential()
model.add(Conv2D(64, (10,10), activation='relu', input_shape=input_shape,
kernel_initializer=initialize_weights, kernel_regularizer=l2(2e-4)))
model.add(MaxPooling2D())
model.add(Conv2D(128, (7,7), activation='relu',
kernel_initializer=initialize_weights,
bias_initializer=initialize_bias, kernel_regularizer=l2(2e-4)))
model.add(MaxPooling2D())
model.add(Conv2D(128, (4,4), activation='relu', kernel_initializer=initialize_weights,
bias_initializer=initialize_bias, kernel_regularizer=l2(2e-4)))
model.add(MaxPooling2D())
model.add(Conv2D(256, (4,4), activation='relu', kernel_initializer=initialize_weights,
bias_initializer=initialize_bias, kernel_regularizer=l2(2e-4)))
model.add(Flatten())
model.add(Dense(4096, activation='sigmoid',
kernel_regularizer=l2(1e-3),
kernel_initializer=initialize_weights,bias_initializer=initialize_bias))
# Generate the encodings (feature vectors) for the two images
encoded_l = model(left_input)
encoded_r = model(right_input)
# # Add a customized layer to compute the absolute difference between the encodings
# L1_layer = Lambda(lambda tensors:K.abs(tensors[0] - tensors[1]))
# L1_distance = L1_layer([encoded_l, encoded_r])
# # Add a dense layer with a sigmoid unit to generate the similarity score
# prediction = Dense(1,activation='sigmoid',bias_initializer=initialize_bias)(L1_distance)
#Connect the inputs with the outputs
siamese_net = Model(inputs=[left_input,right_input],outputs=[encoded_l, encoded_r])
# return the model
return siamese_net
def forward(model, x1, x2):
return model.call([x1,x2])
model = get_siamese_model((105, 105, 1))
test_loss = tf.convert_to_tensor(0.0)
with tf.GradientTape() as test_tape:
test_tape.watch(model.trainable_weights)
test_tape.watch(test_loss)
x, y = get_batch(32)
x1 = tf.cast(tf.convert_to_tensor(x[0]), dtype=tf.float32)
x2 = tf.cast(tf.convert_to_tensor(x[1]), dtype=tf.float32)
y1 = tf.cast(tf.convert_to_tensor(y), dtype=tf.float32)
train_loss = tf.convert_to_tensor(0.0)
with tf.GradientTape() as train_tape:
train_tape.watch(model.trainable_weights)
train_tape.watch(train_loss)
train_loss = contrastive_loss(forward(model, x1, x2), y1)
gradients = train_tape.gradient(train_loss, model.trainable_weights)
old_weights = model.get_weights()
model.set_weights([w - 0.01 * g for w, g in zip(model.trainable_weights, gradients)])
test_loss = contrastive_loss(forward(model, x1, x2), y1)
model.set_weights(old_weights)
print(train_loss)
print(test_loss)
Results:
tf.Tensor(8.294627, shape=(), dtype=float32)
tf.Tensor(8.294627, shape=(), dtype=float32)
Why am I getting the same loss? As you can see the weights are changed but output is the same. Changing the weights should result in a different output which should result in a different loss? Maybe forward changes the weights again?
I assume you are using a crossentropy loss function. The loss you are seeing (8.2...) is essentially the maximum possible loss which means there’s an overflow in the loss calculation. This can commonly happen for example if you predictions are outside of the range 0-1 or if your if you are predicting exactly 0.

Converting keras functional model to keras class in tensorflow 2

I am trying to convert a Keras functional model into class derived from tensorflow.keras.models.Model and I'm facing 2 issues.
1. I need to multiply 2 layers using tensorflow.keras.layers.multiply, but it returns a ValueError: A merge layer should be called on a list of inputs.
2. If I remove this layern thus working with a classical CNN, it returns a tensorflow.python.eager.core._SymbolicException:Inputs to eager execution function cannot be Keras symbolic tensors, but found [<tf.Tensor 'patch:0' shape=(None, 64, 64, 3) dtype=float32>].
I would appreciate some guidance to convert my code. I'm using Python 3.7, TensorFlow 2.0rc2 and Keras 2.3.0. The class I have defined is the following:
class TestCNN(Model):
"""
conv1 > conv2 > fc1 > fc2 > alpha * fc2 > Sigmoid > output
"""
def __init__(self, input_dimension, n_category,**kwargs):
"""
Instanciator
:param input_dimension: tuple of int, theoretically (patch_size x patch_size x channels)
:param n_category: int, the number of categories to classify,
:param weight_decay: float, weight decay parameter for all the kernel regularizers
:return: the Keras model
"""
super(TestCNN, self).__init__(name='testcnn', **kwargs)
self.input_dimension = input_dimension
self.n_category = n_category
self.conv1 = Conv2D(36, activation='relu', name='conv1/relu')
self.conv1_maxpooling = MaxPooling2D((2, 2), name='conv1/maxpooling')
self.conv2 = Conv2D(48, activation='relu', name='conv2/relu')
self.conv2_maxpooling = MaxPooling2D((2, 2), name='conv2/maxpooling')
self.flatten1 = Flatten(name='flatten1')
self.fc1 = Dense(512, activation='relu', name='fc1/relu')
self.fc2 = Dense(512, activation='relu', name='fc2/relu')
self.alpha = TestLayer(layer_dim=128, name='alpha')
self.output1 = TestSigmoid(output_dimension=n_category, name='output_layer')
#tensorflow.function
def call(self, x):
x = self.conv1(x)
x = self.conv1_maxpooling(x)
x = self.conv2(x)
x = self.conv2_maxpooling(x)
x = self.flatten1(x)
x = self.fc1(x)
x = self.fc2(x)
alpha_times_fc2 = multiply([alpha_output, fc2_output], name='alpha_times_fc2')
return self.output1(alpha_times_fc2)
def build(self, **kwargs):
inputs = Input(shape=self.input_dimension, dtype='float32', name='patch')
outputs = self.call(inputs)
super(TestCNN, self).__init__(name="TestCNN", inputs=inputs, outputs=outputs, **kwargs)
Then, in my main loop, I'm creating the instance as following:
testcnn = TestCNN(input_dimension=input_dimension, n_category=training_set.category_count)
optimizer = tensorflow.keras.optimizers.Adam(
lr=parameter['training']['adam']['learning_rate'],
beta_1=parameter['training']['adam']['beta1'],
beta_2=parameter['training']['adam']['beta2'])
metrics_list = [tensorflow.keras.metrics.TruePositives]
loss_function = tensorflow.keras.losses.categorical_crossentropy
loss_metrics = tensorflow.keras.metrics.Mean()
testcnn.build()
testcnn.summary()
This code is raising the tensorflow.python.eager.core._SymbolicException. If I comment out some lines and return directly the results of the fc2 layer, I've got the ValueError.
I have commenter the build() function in my model and call it in my main script as following:
testcnn.build(input_dimension)
testcnn.compile(optimizer=adam_optimizer, loss=loss_function, metrics=metrics_list)
testcnn.summary()
Input dimension is a list formatted as following:
input_dimension = (batch_size, image_size, image_size, channels)

Why doesn't connect to the expected layer in Keras model

I want to construct a variational autoencoder in Keras (2.2.4, with TensorFlow backend), here is my code:
dims = [1000, 256, 64, 32]
x_inputs = Input(shape=(dims[0],), name='inputs')
h = x_inputs
# internal layers in encoder
for i in range(n_stacks-1):
h = Dense(dims[i + 1], activation='relu', kernel_initializer='glorot_uniform', name='encoder_%d' % i)(h)
# hidden layer
z_mean = Dense(dims[-1], kernel_initializer='glorot_uniform', name='z_mean')(h)
z_log_var = Dense(dims[-1], kernel_initializer='glorot_uniform', name='z_log_var')(h)
z = Lambda(sampling, output_shape=(dims[-1],), name='z')([z_mean, z_log_var])
encoder = Model(inputs=x_inputs, outputs=z, name='encoder')
encoder_z_mean = Model(inputs=x_inputs, outputs=z_mean, name='encoder_z_mean')
# internal layers in decoder
latent_inputs = Input(shape=(dims[-1],), name='latent_inputs')
h = latent_inputs
for i in range(n_stacks-1, 0, -1):
h = Dense(dims[i], activation='relu', kernel_initializer='glorot_uniform', name='decoder_%d' % i)(h)
# output
outputs = Dense(dims[0], activation='relu', kernel_initializer='glorot_uniform' name='mean')
decoder = Model(inputs=latent_inputs, outputs=outputs, name='decoder')
ae_output = decoder(encoder_z_mean(x_inputs))
ae = Model(inputs=x_inputs, outputs=ae_output, name='ae')
ae.summary()
vae_output = decoder(encoder(x_inputs))
vae = Model(inputs=x_inputs, outputs=vae_output, name='vae')
vae.summary()
The problem is I can print the summary of the "ae" and "vae" models, but when I train the ae model, it says
tensorflow.python.framework.errors_impl.InvalidArgumentError: You must feed a value for placeholder tensor 'latent_inputs' with dtype float and shape [?,32]
In the model "decoder" is supposed to connect to the output of "encoder_z_mean" layer in the ae model. But when I print the summary of the "ae" model, "decoder" is actually connected to "encoder_z_mean[1][0]". Should it be "encoder_z_mean[0][0]"?
A few corrections:
x_inputs is already the input of the encoders, don't call it again with encoder_z_mean(x_inputs) or with encoder(x_inputs)
Besides creating a second node (the 1 that you are worried with, and that is not a problem), it may be the source of the error because it's not an extra input, but the same input
A healthy usage of this would need the creation of a new Input(...) tensor to be called
The last Dense layer is not being called on a tensor. You probably want (h) there.
Do it this way:
# output - called h in the last layer
outputs = Dense(dims[0], activation='relu', kernel_initializer='glorot_uniform' name='mean')(h)
#unchanged
decoder = Model(inputs=latent_inputs, outputs=outputs, name='decoder')
#adjusted inputs
ae_output = decoder(encoder_z_mean.output)
ae = Model(encoder_z_mean.input, ae_output, name='ae')
ae.summary()
vae_output = decoder(encoder.output)
vae = Model(encoder.input, vae_output, name='vae')
vae.summary()
It's possible that the [1][0] still occurs with the decoder, but this is not a problem at all. It means that the decoder itself has its own input node (number 0), and you created an extra input node (number 1) when you called it with the output of another model. This is harmless. The node 1 will be used while node 0 will be ignored.

TypeError: Input tensors to a Model must be Keras tensors. Found: Tensor("Placeholder_3:0", dtype=float32) (missing Keras metadata)

my input variables
IMG_SIZE_PX=50
SLICE_COUNT=20
n_classes=2
x=tf.placeholder('float')
y=tf.placeholder('float')
keep_rate=0.8
keep_prob=tf.placeholder(tf.float32)
my convolution 3d function
def conv3d(x, W):
return tf.nn.conv3d(x, W, strides=[1,1,1,1,1], padding='SAME')
my maxpooling 3d function
def maxpool3d(x):
return tf.nn.max_pool3d(x, ksize=[1,2,2,2,1], strides=[1,2,2,2,1],
padding='SAME')
this is my network
def convolutional_neural_network(x):
my network weights
weights = {'W_conv1':tf.Variable(tf.random_normal([3,3,3,1,32])),
'W_conv2':tf.Variable(tf.random_normal([3,3,3,32,64])),
'W_fc':tf.Variable(tf.random_normal([ 54080 ,1024])),#here 54080
is the input tensor value
'out':tf.Variable(tf.random_normal([1024, n_classes]))}
my network biases
biases = {'b_conv1':tf.Variable(tf.random_normal([32])),
'b_conv2':tf.Variable(tf.random_normal([64])),
'b_fc':tf.Variable(tf.random_normal([1024])),
'out':tf.Variable(tf.random_normal([n_classes]))}
here is my input x
x = tf.reshape(x, shape=[-1, IMG_SIZE_PX, IMG_SIZE_PX, SLICE_COUNT, 1])
my 2 hidden layers(convolution+maxpooling)
conv1 = tf.nn.relu(conv3d(x, weights['W_conv1']) + biases['b_conv1'])
conv1 = maxpool3d(conv1)
conv2 = tf.nn.relu(conv3d(conv1, weights['W_conv2']) + biases['b_conv2'])
conv2 = maxpool3d(conv2)
my fully connected layer
fc = tf.reshape(conv2,[-1, 54080 ])
fc = tf.nn.relu(tf.matmul(fc, weights['W_fc'])+biases['b_fc'])
fc = tf.nn.dropout(fc, keep_rate)
my output layer
output = tf.matmul(fc, weights['out'])+biases['out']
return output
my input numpy arrays
much_data = np.load('D:/muchdata-50-50-20.npy')
train_data = much_data[-10:]
validation_data = much_data[-2:]
finally training my network
def train_neural_network(x):
outl = convolutional_neural_network(x)#don't know this is my output
layer
model=Model(input=x, output=outl)
model.compile(optimizer='rmsprop',loss='categorical_crossentropy',metrics=
['accuracy'])
train_neural_network(x)#train the net
my error is thiskeras meta data is missing
any help can be appreciated