Create my own convolutional network without using Keras - tensorflow

I've just started with AI and implementing my own CONV networks.
I have this conv network:
model = tf.keras.models.Sequential([
# Note the input shape is the desired size of the image 150x150 with 3 bytes color
tf.keras.layers.Conv2D(16, (3,3), activation='relu', input_shape=(150, 150, 3)),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(32, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
# Flatten the results to feed into a DNN
tf.keras.layers.Flatten(),
# 512 neuron hidden layer
tf.keras.layers.Dense(512, activation='relu'),
# Only 1 output neuron. It will contain a value from 0-1 where 0 for 1 class ('cats') and 1 for the other ('dogs')
tf.keras.layers.Dense(1, activation='sigmoid')
])
Is there any way to implement the Conv2D layers without using keras?
I want to try to understand how they work implementing them by myself.

Related

Tensorflow NIH Chest X-ray CNN validation accuracy not improving even with regularization

I’ve been working on a CNN that takes in a 224x224 grayscale xray image and outputs either 0 or 1 based on whether it detects an abnormality.
This is the dataset I am using. I split the dataset into two with 106496 images for training and the remaining 5624 for validation. Since they’re both from the same dataset, they should both come from the same distribution.
I tried training the model I described above using the pretrained InceptionV3 and VGG19 architectures without success. I then tried making my own model similar to the VGG19 architecture.
I simplified the model as much as possible so that the training accuracy was above 90% and added various regularizers such as dropout and l2. I also tried different hyperparameters and image augmentation but the validation accuracy wouldn’t exceed 70% after 5-10 epochs. The validation loss doesn't seem to drop at all either.
Here are my accuracy vs epoch and loss vs epoch curves (pink is train, green in validation):
And here is my code:
def create_model(settings):
"""
Create a basic model
"""
# create model
model = tf.keras.models.Sequential()
model.add(layers.Input((224, 224, 1)))
# block 1
model.add(layers.Conv2D(64, (3, 3), activation='relu', padding='same', kernel_initializer='he_uniform', use_bias=True, name='block1_conv'))
model.add(layers.MaxPool2D((2, 2), strides=(2, 2), name='block1_pool'))
# block 2
model.add(layers.Conv2D(96, (3, 3), activation='relu', padding='same', kernel_initializer='he_uniform', use_bias=True, name='block2_conv'))
model.add(layers.MaxPool2D((2, 2), strides=(2, 2), name='block2_pool'))
# block 3
model.add(layers.Conv2D(192, (3, 3), activation='relu', padding='same', kernel_initializer='he_uniform', use_bias=True, name='block3_conv1'))
model.add(layers.Conv2D(192, (3, 3), activation='relu', padding='same', kernel_initializer='he_uniform', use_bias=True, name='block3_conv2'))
model.add(layers.MaxPool2D((2, 2), strides=(2, 2), name='block3_pool'))
# block 4
model.add(layers.Conv2D(384, (3, 3), activation='relu', padding='same', kernel_initializer='he_uniform', use_bias=True, name='block4_conv1'))
model.add(layers.Conv2D(384, (3, 3), activation='relu', padding='same', kernel_initializer='he_uniform', use_bias=True, name='block4_conv2'))
model.add(layers.Conv2D(384, (3, 3), activation='relu', padding='same', kernel_initializer='he_uniform', use_bias=True, name='block4_conv3'))
model.add(layers.MaxPool2D((2, 2), strides=(2, 2), name='block4_pool'))
# block 5
model.add(layers.Conv2D(512, (3, 3), activation='relu', padding='same', kernel_initializer='he_uniform', use_bias=True, name='block5_conv1'))
model.add(layers.Conv2D(512, (3, 3), activation='relu', padding='same', kernel_initializer='he_uniform', use_bias=True, name='block5_conv2'))
model.add(layers.Conv2D(512, (3, 3), activation='relu', padding='same', kernel_initializer='he_uniform', use_bias=True, name='block5_conv3'))
model.add(layers.MaxPool2D((2, 2), strides=(2, 2), name='block5_pool'))
# fully connected
model.add(layers.GlobalAveragePooling2D(name='fc_pool'))
model.add(layers.Dropout(0.3, name='fc_dropout'))
model.add(layers.Dense(1, activation='sigmoid', name='fc_output'))
# compile model
model.compile(
optimizers.SGD(
learning_rate=settings["lr_init"],
momentum=settings["momentum"],
),
loss='binary_crossentropy',
metrics=[
'accuracy',
metrics.Precision(),
metrics.Recall(),
metrics.AUC()
]
)
model.summary()
return model
def configure_callbacks(settings):
"""
Create a list of callback objects
"""
# tensorboard
log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
# learning rate reduction on plateau
lrreduce_callback = callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=settings["lr_factor"],
patience=settings["lr_patience"],
min_lr=settings["lr_min"],
verbose=1,
)
# save model
checkpoint_callback = callbacks.ModelCheckpoint(
filepath="saves/" + settings["modelname"] + "/cp-{epoch:03d}",
monitor='val_accuracy',
mode='max',
save_weights_only=True,
save_best_only=True,
verbose=1,
)
return [tensorboard_callback, lrreduce_callback, checkpoint_callback]
def get_data(settings):
"""
Create a generator that will be used for training
"""
df=pd.read_csv("dataset/y_train_binary.csv")
columns = [
"Abnormal"
]
datagen = ImageDataGenerator(
rescale=1./255.,
rotation_range=5,
brightness_range=(0.9, 1.1),
zoom_range=(1, 1.1),
)
# 94.983% for training (106496 = 64*6656)
traindata = datagen.flow_from_dataframe(
dataframe=df[:NTRAIN],
directory="dataset/images",
x_col="Image Index",
y_col=columns,
color_mode='grayscale',
batch_size=settings["batchsize"],
class_mode="raw",
target_size=(224,224),
shuffle=True,
)
# 5.017% for testing (5624)
testdata = datagen.flow_from_dataframe(
dataframe=df[NTRAIN:],
directory="dataset/images",
x_col="Image Index",
y_col=columns,
color_mode='grayscale',
batch_size=settings["batchsize"],
class_mode="raw",
target_size=(224,224),
shuffle=True,
)
return (traindata, testdata)
def newtrain(settings):
"""
Create a new model "(modelname)" and train for (epoch) epochs
"""
model = create_model(settings)
callbacks = configure_callbacks(settings)
traindata, testdata = get_data(settings)
# train
model.fit(
x=traindata,
epochs=settings["epoch"],
validation_data=testdata,
callbacks=callbacks,
verbose=1,
)
model.save_weights(f"saves/{settings['modelname']}/cp-{settings['epoch']:03d}")
I’m running out of ideas and it takes half a day to train 50 epochs so I would appreciate if anyone knows how I can solve this issue. Thanks.
If you do some EDA on NIH Chest X-rays you may also see that there is a significant class imbalance issue among 14 classes. By your model definition, I can assume that you put a normal image on one side and an abnormal (13 cases) on the other side. First of all, if this true, I would say, it's better to classify all cases - all are important in clinician practice.
Shift to 14 cases classification
You're using your own design model, but you should first start with the pre-trained model. It's better and next you can gradually integrate your own idea.
Use pretriend model, e.g DenseNet, EfficientNet, NFNet etc
In your data generator, you use shuffle=True for the test set, which is wrong, rather it should be False.
testdata = datagen.flow_from_dataframe(
....
target_size=(224,224),
shuffle=False
For better control of your input pipeline, IMO, you should write your own custom data generator and experiment with advanced augmentation to prevent overfitting stuff.

How to transfer weights from baseline model to federated model?

def create_keras_model():
model = Sequential([
Conv2D(16, 3, padding='same', activation='relu'),
MaxPooling2D(),
Conv2D(32, 3, padding='same', activation='relu'),
MaxPooling2D(),
Conv2D(64, 3, padding='same', activation='relu'),
MaxPooling2D(),
Flatten(),
Dense(512, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.001)),
Dropout(0.5),
Dense(1, activation='sigmoid')
])
model.load_weights('/content/drive/My Drive/localmodel/weights')
return model
Tried something like this in Colab, but I get errno 21, is a directory.
Then I tried another method as shown below,
tff_model = create_keras_model() #now this function doesnt load weights, just returns a Sequential model
tff.learning.assign_weights_to_keras_model(tff_model, model_with_weights)
Just like assign_weights_to_keras_model() transfers weights from tff_model to keras model, I want to transfer weights from keras model to tff_model. How can this be done?
here model_with_weights must be a TFF value representing the weights of a model for example:
def model_fn():
keras_model = create_keras_model()
return tff.learning.from_keras_model(keras_model)
fed_avg = tff.learning.build_federated_averaging_process(model_fn, ...)
state = fed_avg.initialize()
state = fed_avg.next(state, ...)
...
tff.learning.assign_weights_to_keras_model(keras_model, state.model)
I just got to know how this can be done.
The idea is to use:
tff.learning.state_with_new_model_weights(state, trainable_weights_numpy, non_trainable_weights_numpy)
Documentation here
where trainable weights are taken from baseline model and converted to numpy format.
trainable_weights = []
for weights in baseline_model.trainable_weights:
trainable_weights.append(weights.numpy())
This is particularly useful when the server has part of the data and the client has similar data. May be this can be used for transfer learning.

model.fit_generator.() returns error.Invalid Argument

Below is the code i am using for training some gestures. the directory for training data is as follows
'E:\build\set_1\training\palm\seq_01','E:\build\set_1\training\palm\seq_02' and so on.
The error i am follwong is on the last lines. I have tried both of the two lines as provided but they are giving error as Invalid Argument error. I am running this code on jupyter notebook.
import tensorflow as tf
from tensorflow import keras
from keras_preprocessing.image import ImageDataGenerator
path = 'E:\build\set_1\training'
training_datagen = ImageDataGenerator(rescale = 1./255)
TRAINING_DIR = 'E:/build/set_1/training/'
train_generator = training_datagen.flow_from_directory(
TRAINING_DIR,
target_size = (150,150),
class_mode= 'categorical',
batch_size=64
)
VALIDATION_DIR = "E:/build/set_1/test/"
validation_datagen = ImageDataGenerator(rescale = 1./255)
validation_generator = training_datagen.flow_from_directory(
VALIDATION_DIR,
target_size=(150,150),
class_mode='categorical',
batch_size=64
)
model = tf.keras.models.Sequential([
# Note the input shape is the desired size of the image 150x150 with 3 bytes color
# This is the first convolution
tf.keras.layers.Conv2D(64, (3,3), activation='relu', input_shape=(150, 150, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
# The second convolution
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
# The third convolution
tf.keras.layers.Conv2D(128, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
# The fourth convolution
tf.keras.layers.Conv2D(128, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
# Flatten the results to feed into a DNN
tf.keras.layers.Flatten(),
tf.keras.layers.Dropout(0.5),
# 512 neuron hidden layer
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(3, activation='softmax')
])
model.summary()
model.compile(loss='categorical_crossentropy',optimizer = 'rmsprop',
metrics= ['accuracy'])
history = model.fit_generator(train_generator,steps_per_epoch = train_generator.samples//train_generator.batch_size,epochs = 30,validation_data = validation_generator,validation_steps=validation_generator.samples//validation_generator.batch_size)
history = model.fit(train_generator, epochs=25, validation_data = validation_generator, verbose = 1)

How to add customm layers inside vgg16 when doing transfer learning?

I am trying to use transfer learning using vgg16. My main concept is to train the first few layers of vgg16, and add my own layer, afterwords add the rest of the layers from vgg16, and add my own output layer to the end. To do this I follow this sequence: (1) load layers and freez layers, (2) add my layers, (3) load the rest of layers (except the output layer) [THIS IS WHERE I ENCOUNTER THE FOLLOWING ERROR] and freez the layer, (4) add output layer. Is my approach ok? If not, then where I am doing wrong? Here's the error:
ValueError: Input 0 is incompatible with layer block3_conv1: expected axis -1 of input shape to have value 128 but got shape (None, 64, 56, 64)
The full code is here for better understanding:
vgg16_model= load_model('Fetched_VGG.h5')
vgg16_model.summary()
model= Sequential()
#add vgg layer (inputLayer, block1, block2)
for layer in vgg16_model.layers[0:6]:
model.add(layer)
#frees
# Freezing the layers (Oppose weights to be updated)
for layer in model.layers:
layer.trainable = False
#add custom
model.add(Conv2D(64, (3, 3), activation='relu', padding='same', name='block66_conv1_m') )
model.add( Conv2D(64, (3, 3), activation='relu', padding='same', name='block66_conv2_m') )
model.add( Conv2D(64, (3, 3), activation='relu', padding='same', name='block66_conv3_m') )
model.add( MaxPooling2D((2, 2), strides=(2, 2), name='block66_pool_m'))
# add vgg layer (block 3 to last layer (except the output dense layer))
for layer in vgg16_model.layers[7:-1]:
model.add(layer)
# Freezing the layers (Oppose weights to be updated)
for layer in model.layers:
layer.trainable = False
# add out out layer
model.add(Dense(2, activation='softmax', name='predictions'))
model.summary()
As VGG16 layer 7 is expecting 128 filters you'll need to match this with your final Conv2D
model.add( Conv2D(128, (3, 3), activation='relu', padding='same', name='block66_conv3_m') )
If the dimensions match you should be able to build your model but it's not clear what you're trying to achieve. Your approach of adding to the middle of the VGG16 model will mean that all the downstream layers will need to be retrained

Pretrained Tensorflow model RGB -> RGBY channel extension

I am working on the protein analysis project. We receive the images* of proteins with 4 filters (Red, Green, Blue and Yellow). Every of those RGBY channels contains unique data as different cellular structures are visible with different filters.
The idea is to use a pre-trained network e.g. VGG19 and extend the number of channels from default 3 to 4. Something like this:
(My appologies, I am not allowed to add images directly before 10 reputation, please press the "Run code snippet" button to visualize):
<img src="https://i.stack.imgur.com/TZKka.png" alt="Italian Trulli">
Picture: VGG model with RGB extended to RGBY
The Y channel should be the copy of the existing pretrained channel. Then it is possible to make use of the pretrained weights.
Does anyone have an idea of how such extension of a pretrained network can be achieved?
*
Author of the collage - Allunia from Kaggle, "Protein Atlas - Exploration and Baseline" kernel.
Use the layer.get_weights() and layer.set_weights() functions of Keras api.
Create a template structure for 4-layers VGG (set input shape=(width, height, 4)). Then load the weights from 3-channel RGB model into 4-channel as RGBB.
Below is the code that does the procedure. In case of sequential VGG, the only layer that needs to be modified is the first Convolution layer. The structure of the subsequent layers is independent on the number of channels.
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from keras.applications.vgg19 import VGG19
from keras.models import Model
vgg19 = VGG19(weights='imagenet')
vgg19.summary() # To check which layers will be omitted in 'pretrained' model
# Load part of the VGG without the top layers into 'pretrained' model
pretrained = Model(inputs=vgg19.input, outputs=vgg19.get_layer('block5_pool').output)
pretrained.summary()
#%% Prepare model template with 4 input channels
config = pretrained.get_config() # run config['layers'][i] for reference
# to restore layer-by layer structure
from keras.layers import Input, Conv2D, MaxPooling2D
from keras import optimizers
# For training from scratch change kernel_initializer to e.g.'VarianceScaling'
inputs = Input(shape=(224, 224, 4), name='input_17')
# block 1
x = Conv2D(64, (3,3), padding='same', activation='relu', kernel_initializer='zeros', name='block1_conv1')(inputs)
x = Conv2D(64, (3,3), padding='same', activation='relu', kernel_initializer='zeros', name='block1_conv2')(x)
x = MaxPooling2D(pool_size=(2, 2), name='block1_pool')(x)
# block 2
x = Conv2D(128, (3,3), padding='same', activation='relu', kernel_initializer='zeros', name='block2_conv1')(x)
x = Conv2D(128, (3,3), padding='same', activation='relu', kernel_initializer='zeros', name='block2_conv2')(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2,2), name='block2_pool')(x)
# block 3
x = Conv2D(256, (3,3), padding='same', activation='relu', kernel_initializer='zeros', name='block3_conv1')(x)
x = Conv2D(256, (3,3), padding='same', activation='relu', kernel_initializer='zeros', name='block3_conv2')(x)
x = Conv2D(256, (3,3), padding='same', activation='relu', kernel_initializer='zeros', name='block3_conv3')(x)
x = Conv2D(256, (3,3), padding='same', activation='relu', kernel_initializer='zeros', name='block3_conv4')(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2,2), name='block3_pool')(x)
# block 4
x = Conv2D(512, (3,3), padding='same', activation='relu', kernel_initializer='zeros', name='block4_conv1')(x)
x = Conv2D(512, (3,3), padding='same', activation='relu', kernel_initializer='zeros', name='block4_conv2')(x)
x = Conv2D(512, (3,3), padding='same', activation='relu', kernel_initializer='zeros', name='block4_conv3')(x)
x = Conv2D(512, (3,3), padding='same', activation='relu', kernel_initializer='zeros', name='block4_conv4')(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2,2), name='block4_pool')(x)
# block 5
x = Conv2D(512, (3,3), padding='same', activation='relu', kernel_initializer='zeros', name='block5_conv1')(x)
x = Conv2D(512, (3,3), padding='same', activation='relu', kernel_initializer='zeros', name='block5_conv2')(x)
x = Conv2D(512, (3,3), padding='same', activation='relu', kernel_initializer='zeros', name='block5_conv3')(x)
x = Conv2D(512, (3,3), padding='same', activation='relu', kernel_initializer='zeros', name='block5_conv4')(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2,2), name='block5_pool')(x)
vgg_template = Model(inputs=inputs, outputs=x)
vgg_template.compile(optimizer=optimizers.RMSprop(lr=2e-4),
loss='categorical_crossentropy',
metrics=['acc'])
#%% Rewrite the weight loading/modification function
import numpy as np
layers_to_modify = ['block1_conv1'] # Turns out the only layer that changes
# shape due to 4th channel is the first
# convolution layer.
for layer in pretrained.layers: # pretrained Model and template have the same
# layers, so it doesn't matter which to
# iterate over.
if layer.get_weights() != []: # Skip input, pooling and no weights layers
target_layer = vgg_template.get_layer(name=layer.name)
if layer.name in layers_to_modify:
kernels = layer.get_weights()[0]
biases = layer.get_weights()[1]
kernels_extra_channel = np.concatenate((kernels,
kernels[:,:,-1:,:]),
axis=-2) # For channels_last
target_layer.set_weights([kernels_extra_channel, biases])
else:
target_layer.set_weights(layer.get_weights())
#%% Save 4 channel model populated with weights for futher use
vgg_template.save('vgg19_modified_clear.hdf5')
Beyond the RGBY case, the following snippet works generally by copying or removing the layer's weights and/or biases vectors dimensions as needed. Please refer to numpy documentation on what numpy.resize does: in the case of the original question it copies the B-channel weights onto the Y-channel (or more generally onto any higher dimensionality).
import numpy as np
import tensorflow as tf
...
model = ... # your RGBY model is here
pretrained_model = tf.keras.models.load_model(...) # pretrained RGB model
# the following assumes that the layers match with the two models and
# only the shapes of weights and/or biases are different
for pretrained_layer, layer in zip(pretrained_model.layers, model.layers):
pretrained = pretrained_layer.get_weights()
target = layer.get_weights()
if len(pretrained) == 0: # skip input, pooling and other no weights layers
continue
try:
# set the pretrained weights as is whenever possible
layer.set_weights(pretrained)
except:
# numpy.resize to the rescue whenever there is a shape mismatch
for idx, (l1, l2) in enumerate(zip(pretrained, target)):
target[idx] = np.resize(l1, l2.shape)
layer.set_weights(target)