What is wrong with my neural networks prediction code? All predictions are returning the same class name for every image - tensorflow

Here is my training code:
def train():
#START
img_input = layers.Input(shape=(150, 150, 3))
x = layers.Conv2D(16, 3, activation='relu')(img_input)
x = layers.MaxPooling2D(2)(x)
x = layers.Conv2D(32, 3, activation='relu')(x)
x = layers.MaxPooling2D(2)(x)
x = layers.Conv2D(64, 3, activation='relu')(x)
x = layers.MaxPooling2D(2)(x)
x = layers.Flatten()(x)
x = layers.Dense(512, activation='relu')(x)
output = layers.Dense(1, activation='sigmoid')(x)
model = Model(img_input, output)
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=0.001),
metrics=['acc'])
#END
# All images will be rescaled by 1./255
train_datagen = ImageDataGenerator(rescale=1./255)
val_datagen = ImageDataGenerator(rescale=1./255)
bs = 20
# Flow training images in batches of 20 using train_datagen generator
train_generator = train_datagen.flow_from_directory(
train_dir, # This is the source directory for training images
target_size=(150, 150), # All images will be resized to 150x150
batch_size=bs,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
# Flow validation images in batches of 20 using val_datagen generator
validation_generator = val_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=bs,
class_mode='binary')
history = model.fit(
train_generator,
steps_per_epoch=train_steps,
epochs=4,
validation_data=validation_generator,
validation_steps=val_steps,
verbose=1)
model.save_weights("trained_weights.h5")
Here is my prediction code:
def evaluate(imgpath):
if not os.path.isfile(imgpath):
print("No such file: {}".format(imgpath))
sys.exit(-1)
# START
img_input = layers.Input(shape=(150, 150, 3))
x = layers.Conv2D(16, 3, activation='relu')(img_input)
x = layers.MaxPooling2D(2)(x)
x = layers.Conv2D(32, 3, activation='relu')(x)
x = layers.MaxPooling2D(2)(x)
x = layers.Conv2D(64, 3, activation='relu')(x)
x = layers.MaxPooling2D(2)(x)
x = layers.Flatten()(x)
x = layers.Dense(512, activation='relu')(x)
output = layers.Dense(1, activation='sigmoid')(x)
model = Model(img_input, output)
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=0.001),
metrics=['acc'])
# END
model.load_weights("trained_weights.h5")
img = image.load_img(path=imgpath,grayscale=False,target_size=(150,150),color_mode='rgb')
img_arr = image.img_to_array(img)
test_img = np.expand_dims(img_arr, axis=0)
y_prob = model.predict(test_img)
classname = y_prob.argmax(axis=-1)
print("Class: ",classname)
return classname
I have a feeling that the error is somewhere in the last 5-6 lines of the evaluate function, where I am loading the image. The problem is that whenever I run the evaluate function for any image, my output is [0]. Even though the training went well, as seen in the image below.
enter image description here
Am I making some silly mistake somewhere?

since you have a single neuron as the top layer, when you do predictions you will get a single prediction. Since you have a single prediction using argmax will always return 0. What you need to do is to set a threshold value for the prediction for example
if yprob>=.5:
klass=1
else:
klass=0
Also as pointed out by Dr. Snoopy you should rescale your image by 1/255.

Related

Keras RMSE MAE finding

I have a problem with my code. I will be very happy if you can help.
The purpose is having mean absolute errors and root mean square errors with different epochs and batch sizes. I'm very new in deep learning so I have tried to do that like this. However, i am very confused.
How can I fix or rewrite this code. Thank you so much.
# Reading the file
df = pd.read_csv('data.csv')
df = df[df.columns.difference(['Unnamed: 0'])]
input_data = df.iloc[:,:100].values
label_MOS = df['MOS'].values
train_X, val_X, train_y, val_y = train_test_split(input_data,
label_MOS, test_size = 0.25, random_state = 14)
x_train = train_X
y_train = train_y
x_test = val_X
y_test = val_y
def create_model():
model=Sequential()
model.add(Dense(32, input_dim=100, kernel_initializer='normal', activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
adam=Adam(learning_rate=0.1)
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mae'])
return model
# Create the model
model = KerasClassifier(build_fn = create_model,verbose = 0)
# Define the grid search parameters
batch_size = [20]
epochs = [500,1000]
# Make a dictionary of the grid search parameters
param_grid = dict(batch_size = batch_size,epochs = epochs)
# Build and fit the GridSearchCV
grid = GridSearchCV(estimator = model,param_grid = param_grid,cv = KFold(),verbose = )
grid_result = grid.fit(x_train,y_train)
NNpredictions = model.predict(x_test)
MAE = mean_absolute_error(val_y , NNpredictions)
RMSE = mean_squared_error(val_y , NNpredictions, squared = False)
# Summarize the results
print(' MAE {}, RMSE {}'.format(MAE.best_score_,RMSE.best_params_))
mae = MAE.cv_results_['mae']
rmse = RMSE.cv_results_['rmse']
# params = grid_result.cv_results_['params']
for mean, stdev in zip(mae, rmse):
print("mae %f rmse (%f) " % (mean, stdev))
I would do something like this:
batch_size = [20]
epochs = [500,1000]
result_list = list()
for batch_value in batch_size:
for epoch_value in epochs:
model = create_model()
model.fit(x=x_train,y=y_train,epochs=epoch_value, batch_size=batch_value)
metrics = model.evaluate(x=x_test,y=y_test)
ord_dic = collections.OrderedDict()
ord_dic['batch_size'] = batch_value
ord_dic['epochs'] = epoch_value
ord_dic['metrics'] = metrics
result_list.append(ord_dic)
print(result_list)
I have put the results in a list of Ordered Dictionaries, but you can easily change that part
I updated your code
from keras import backend as K
def create_model(losses='mse'):
model=Sequential()
model.add(Dense(32, input_dim=100, kernel_initializer='normal', activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
adam=Adam(learning_rate=0.1)
model.compile(optimizer=adam, loss=losses, metrics=['accuracy'])
return model
def root_mean_squared_error(y_true, y_pred):
return K.sqrt(K.mean(K.square(y_pred - y_true)))
batch_size = [20]
epochs = [500,1000]
losses = ['mse', root_mean_squared_error]
neural_network = KerasClassifier(build_fn=network, verbose = 1)
param_grid = dict(losses=losses, epochs=epochs, batch_size = batches)
grid = GridSearchCV(estimator=neural_network, param_grid=param_grid )
grid_result = grid.fit(X_train, y_train)
print(grid_result.best_params_)
The create_model function needs to have a losses parameter, it's where your grid will pass the parameter.

Low Triplet loss accuracy CIFAR10

I started learning about triplet networks and decided to implement using convolutional neural networks, but I decided to use the CIFAR-10 dataset for image classification, but I get very low accuracy.
After training the accuracy is around 0.32. .
def pairwise_distance(feature, squared=False):
"""Computes the pairwise distance matrix with numerical stability.
output[i, j] = || feature[i, :] - feature[j, :] ||_2
Args:
feature: 2-D Tensor of size [number of data, feature dimension].
squared: Boolean, whether or not to square the pairwise distances.
Returns:
pairwise_distances: 2-D Tensor of size [number of data, number of data].
"""
pairwise_distances_squared = math_ops.add(
math_ops.reduce_sum(math_ops.square(feature), axis=[1], keepdims=True),
math_ops.reduce_sum(
math_ops.square(array_ops.transpose(feature)),
axis=[0],
keepdims=True)) - 2.0 * math_ops.matmul(feature,
array_ops.transpose(feature))
# Deal with numerical inaccuracies. Set small negatives to zero.
pairwise_distances_squared = math_ops.maximum(pairwise_distances_squared, 0.0)
# Get the mask where the zero distances are at.
error_mask = math_ops.less_equal(pairwise_distances_squared, 0.0)
# Optionally take the sqrt.
if squared:
pairwise_distances = pairwise_distances_squared
else:
pairwise_distances = math_ops.sqrt(
pairwise_distances_squared + math_ops.to_float(error_mask) * 1e-16)
# Undo conditionally adding 1e-16.
pairwise_distances = math_ops.multiply(
pairwise_distances, math_ops.to_float(math_ops.logical_not(error_mask)))
num_data = array_ops.shape(feature)[0]
# Explicitly set diagonals to zero.
mask_offdiagonals = array_ops.ones_like(pairwise_distances) - array_ops.diag(
array_ops.ones([num_data]))
pairwise_distances = math_ops.multiply(pairwise_distances, mask_offdiagonals)
return pairwise_distances
def masked_maximum(data, mask, dim=1):
"""Computes the axis wise maximum over chosen elements.
Args:
data: 2-D float `Tensor` of size [n, m].
mask: 2-D Boolean `Tensor` of size [n, m].
dim: The dimension over which to compute the maximum.
Returns:
masked_maximums: N-D `Tensor`.
The maximized dimension is of size 1 after the operation.
"""
axis_minimums = math_ops.reduce_min(data, dim, keepdims=True)
masked_maximums = math_ops.reduce_max(
math_ops.multiply(data - axis_minimums, mask), dim,
keepdims=True) + axis_minimums
return masked_maximums
def masked_minimum(data, mask, dim=1):
"""Computes the axis wise minimum over chosen elements.
Args:
data: 2-D float `Tensor` of size [n, m].
mask: 2-D Boolean `Tensor` of size [n, m].
dim: The dimension over which to compute the minimum.
Returns:
masked_minimums: N-D `Tensor`.
The minimized dimension is of size 1 after the operation.
"""
axis_maximums = math_ops.reduce_max(data, dim, keepdims=True)
masked_minimums = math_ops.reduce_min(
math_ops.multiply(data - axis_maximums, mask), dim,
keepdims=True) + axis_maximums
return masked_minimums
def triplet_loss_adapted_from_tf(y_true, y_pred):
del y_true
margin = 1.
labels = y_pred[:, :1]
labels = tf.cast(labels, dtype='int32')
embeddings = y_pred[:, 1:]
### Code from Tensorflow function [tf.contrib.losses.metric_learning.triplet_semihard_loss] starts here:
# Reshape [batch_size] label tensor to a [batch_size, 1] label tensor.
# lshape=array_ops.shape(labels)
# assert lshape.shape == 1
# labels = array_ops.reshape(labels, [lshape[0], 1])
# Build pairwise squared distance matrix.
pdist_matrix = pairwise_distance(embeddings, squared=True)
# Build pairwise binary adjacency matrix.
adjacency = math_ops.equal(labels, array_ops.transpose(labels))
# Invert so we can select negatives only.
adjacency_not = math_ops.logical_not(adjacency)
# global batch_size
batch_size = array_ops.size(labels) # was 'array_ops.size(labels)'
# Compute the mask.
pdist_matrix_tile = array_ops.tile(pdist_matrix, [batch_size, 1])
mask = math_ops.logical_and(
array_ops.tile(adjacency_not, [batch_size, 1]),
math_ops.greater(
pdist_matrix_tile, array_ops.reshape(
array_ops.transpose(pdist_matrix), [-1, 1])))
mask_final = array_ops.reshape(
math_ops.greater(
math_ops.reduce_sum(
math_ops.cast(mask, dtype=dtypes.float32), 1, keepdims=True),
0.0), [batch_size, batch_size])
mask_final = array_ops.transpose(mask_final)
adjacency_not = math_ops.cast(adjacency_not, dtype=dtypes.float32)
mask = math_ops.cast(mask, dtype=dtypes.float32)
# negatives_outside: smallest D_an where D_an > D_ap.
negatives_outside = array_ops.reshape(
masked_minimum(pdist_matrix_tile, mask), [batch_size, batch_size])
negatives_outside = array_ops.transpose(negatives_outside)
# negatives_inside: largest D_an.
negatives_inside = array_ops.tile(
masked_maximum(pdist_matrix, adjacency_not), [1, batch_size])
semi_hard_negatives = array_ops.where(
mask_final, negatives_outside, negatives_inside)
loss_mat = math_ops.add(margin, pdist_matrix - semi_hard_negatives)
mask_positives = math_ops.cast(
adjacency, dtype=dtypes.float32) - array_ops.diag(
array_ops.ones([batch_size]))
# In lifted-struct, the authors multiply 0.5 for upper triangular
# in semihard, they take all positive pairs except the diagonal.
num_positives = math_ops.reduce_sum(mask_positives)
semi_hard_triplet_loss_distance = math_ops.truediv(
math_ops.reduce_sum(
math_ops.maximum(
math_ops.multiply(loss_mat, mask_positives), 0.0)),
num_positives,
name='triplet_semihard_loss')
### Code from Tensorflow function semi-hard triplet loss ENDS here.
return semi_hard_triplet_loss_distance
def create_base_network(image_input_shape, embedding_size):
weight_decay = 1e-4
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay),
input_shape=image_input_shape))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(Conv2D(32, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(64, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(Conv2D(64, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.3))
model.add(Conv2D(128, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(Conv2D(128, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dense(embedding_size, activation='softmax'))
return model
if __name__ == "__main__":
# in case this scriot is called from another file, let's make sure it doesn't start training the network...
batch_size = 128
epochs = 100
train_flag = True # either True or False
embedding_size = 64
no_of_components = 2 # for visualization -> PCA.fit_transform()
step = 10
# The data, split between train and test sets
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255.
x_test /= 255.
input_image_shape = (32, 32, 3)
x_val = x_test#[:2000, :, :]
y_val = y_test#[:2000]
# Network training...
if train_flag == True:
base_network = create_base_network(input_image_shape, embedding_size)
input_images = Input(shape=input_image_shape, name='input_image') # input layer for images
input_labels = Input(shape=(1,), name='input_label') # input layer for labels
embeddings = base_network([input_images]) # output of network -> embeddings
labels_plus_embeddings = concatenate([input_labels, embeddings]) # concatenating the labels + embeddings
# Defining a model with inputs (images, labels) and outputs (labels_plus_embeddings)
model = Model(inputs=[input_images, input_labels],
outputs=labels_plus_embeddings)
#model.summary()
#plot_model(model, to_file='model.png', show_shapes=True, show_layer_names=True)
# train session
opt = Adam(lr=0.001) # choose optimiser. RMS is good too!
model.compile(loss=triplet_loss_adapted_from_tf,
optimizer=opt)
filepath = "semiH_trip_MNIST_v13_ep{epoch:02d}_BS%d.hdf5" % batch_size
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=False, period=25)
callbacks_list = [checkpoint]
# Uses 'dummy' embeddings + dummy gt labels. Will be removed as soon as loaded, to free memory
dummy_gt_train = np.zeros((len(x_train), embedding_size + 1))
dummy_gt_val = np.zeros((len(x_val), embedding_size + 1))
x_train = np.reshape(x_train, (len(x_train), x_train.shape[1], x_train.shape[1], 3))
x_val = np.reshape(x_val, (len(x_val), x_train.shape[1], x_train.shape[1], 3))
H = model.fit(
x=[x_train, y_train],
y=dummy_gt_train,
batch_size=batch_size,
epochs=epochs,
validation_data=([x_val, y_val], dummy_gt_val),
callbacks=callbacks_list)
else:
#####
model = load_model('semiH_trip_MNIST_v13_ep25_BS256.hdf5',
custom_objects={'triplet_loss_adapted_from_tf': triplet_loss_adapted_from_tf})
# Test the network
# creating an empty network
testing_embeddings = create_base_network(input_image_shape,
embedding_size=embedding_size)
x_embeddings_before_train = testing_embeddings.predict(np.reshape(x_test, (len(x_test), 32, 32, 3)))
# Grabbing the weights from the trained network
for layer_target, layer_source in zip(testing_embeddings.layers, model.layers[2].layers):
weights = layer_source.get_weights()
layer_target.set_weights(weights)
del weights
# Visualizing the effect of embeddings -> using PCA!
x_embeddings = testing_embeddings.predict(x_train)
y_embeddings = testing_embeddings.predict(x_val)
svc = SVC()
svc.fit(x_embeddings, y_train)
valid_prediction = svc.predict(y_embeddings)
print(valid_prediction.shape)
print("validation accuracy : ", accuracy_score(y_val, valid_prediction))
i would really appriciate if you guys could check if i am not doing things right. Hope to hear from anyone soon
Try a simpler network like this (from here):
def create_base_network(image_input_shape, embedding_size):
input_image = Input(shape=image_input_shape)# input_5:InputLayer
x = Flatten()(input_image)
x = Dense(128, activation='relu')(x)
x = Dropout(0.1)(x)
x = Dense(128, activation='relu')(x)
x = Dropout(0.1)(x)
x = Dense(embedding_size)(x) # dense_15: Dense
base_network = Model(inputs=input_image, outputs=x)
plot_model(base_network, to_file='base_netwoN.png',
show_shapes=True, show_layer_names=True)
return base_network

Keras Why binary classification isn't as accurate as categorical calssification

I am trying to create a model which can tell whether there are birds in an image or not.
I was using categorical classification to train the model to recognize Bird v.s. Flowers, the results turned to be very successful in terms of recognizing these 2 classes.
BUT, when I change it to Binary Classification to detect the existence of birds in an images, the accuracy dropped dramatically.
The reason why I changed to use Binary classification is that if I
provided a dog to my Categorical Classification trained model, it
recognized the dog as a bird.
btw, here is my data set structure:
Training:
5000 images for birds and 2000 images for not-birds
Validating:
1000 images for birds and 500 images for not-birds
Someone said, the inblanced dataset will also cause problems. Is it true?
Could someone please point out where I get wrong in the following code?
def get_num_files(path):
if not os.path.exists(path):
return 0
return sum([len(files) for r, d, files in os.walk(path)])
def get_num_subfolders(path):
if not os.path.exists(path):
return 0
return sum([len(d) for r, d, files in os.walk(path)])
def create_img_generator():
return ImageDataGenerator(
preprocessing_function=preprocess_input,
rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True
)
INIT_LT = 1e-3
Image_width, Image_height = 299, 299
Training_Epochs = 30
Batch_Size = 32
Number_FC_Neurons = 1024
Num_Classes = 2
train_dir = 'to my train folder'
validate_dir = 'to my validation folder'
num_train_samples = get_num_files(train_dir)
num_classes = get_num_subfolders(train_dir)
num_validate_samples = get_num_files(validate_dir)
num_epoch = Training_Epochs
batch_size = Batch_Size
train_image_gen = create_img_generator()
test_image_gen = create_img_generator()
train_generator = train_image_gen.flow_from_directory(
train_dir,
target_size=(Image_width, Image_height),
batch_size = batch_size,
seed = 42
)
validation_generator = test_image_gen.flow_from_directory(
validate_dir,
target_size=(Image_width, Image_height),
batch_size=batch_size,
seed=42
)
Inceptionv3_model = InceptionV3(weights='imagenet', include_top=False)
print('Inception v3 model without last FC loaded')
x = Inceptionv3_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(Number_FC_Neurons, activation='relu')(x)
predictions = Dense(num_classes, activation='softmax')(x)
# model = Model(inputs=Inceptionv3_model.input, outputs=predictions)
v3model = Model(inputs=Inceptionv3_model.input, outputs=predictions)
# Use new Sequential model to add v3model and add a bath normalization layer after
model = Sequential()
model.add(v3model)
model.add(BatchNormalization()) # added normalization
print(model.summary())
print('\nFine tuning existing model')
Layers_To_Freeze = 172
for layer in model.layers[:Layers_To_Freeze]:
layer.trainable = False
for layer in model.layers[Layers_To_Freeze:]:
layer.trainable = True
optizer = Adam(lr=INIT_LT, decay=INIT_LT / Training_Epochs)
# optizer = SGD(lr=0.0001, momentum=0.9)
model.compile(optimizer=optizer, loss='binary_crossentropy', metrics=['accuracy'])
cbk_early_stopping = EarlyStopping(monitor='val_acc', mode='max')
history_transfer_learning = model.fit_generator(
train_generator,
steps_per_epoch = num_train_samples,
epochs=num_epoch,
validation_data=validation_generator,
validation_steps = num_validate_samples,
class_weight='auto',
callbacks=[cbk_early_stopping]
)
model.save('incepv3_transfer_mini_binary.h5', overwrite=True, include_optimizer=True)
Categorical
Use Num_Classes = 2
Use one-hot-encoded targets (example: Bird = [1, 0], Flower = [0, 1])
Use 'softmax' activation
Use 'categorical_crossentropy'
Binary
Use Num_Classes = 1
Use binary targets (example: is flower = 1 | not flower = 0)
Use 'sigmoid' activation
Use 'binary_crossentropy'
Details here: Using categorical_crossentropy for only two classes

TensorFlow Keras Optimise prediction

I'm Using tensorflow and keras to predict handwrting digits. For training I'm using nmist dataset.
the accuracy is about 98.8% after training. but sometimes in test its confuse between 4 and 9 , 7 and 3, i'm alerady optimize the image input with opencv, like remove noise, rescale, threshold etc.
What should i do next to improved this prdiction accuracy?
My plan is add more sample, and resize the sample image from 28x28 to 56x56.
Will this affect accuracy?
This my model for training:
epoc=15, batch size=64
def build_model():
model = Sequential()
# add Convolutional layers
model.add(Conv2D(filters=32, kernel_size=(3,3), activation='relu', padding='same', input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(filters=64, kernel_size=(3,3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(filters=64, kernel_size=(3,3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
# Densely connected layers
model.add(Dense(128, activation='relu'))
# output layer
model.add(Dense(10, activation='softmax'))
# compile with adam optimizer & categorical_crossentropy loss function
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
return model
You can try to add regularization:
def conv2d_bn(x,
units,
kernel_size=(3, 3),
activation='relu',
dropout=.5):
y = Dropout(x)
y = Conv2D(units, kernel_size=kernel_size, use_bias=False)(y)
y = BatchNormalization(y)
y = Activation(activation)(y)
return y
def build_model(..., dropout=.5):
x = Input(shape=[...])
y = conv2d_bn(x, 32)
y = MaxPooling2D(y)
...
y = Dropout(dropout)(y)
y = Dense(10, activation='softmax')
model = Model(x, y)
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
You can tweak the class weights to force the model to pay more attention to classes 3, 4, 7 and 9 during training:
model.fit(..., class_weights={0: 1, 1: 1, 2:1, 3:2, 4:2, 5:1, 6:1, 7:2, 8:1, 9:2})
If you have some time to burn, you can also try to grid or random-search the models hyperparameters. Something in the lines:
def build(conv_layers, dense_layers, dense_units, activation, dropout):
y = x = Input(shape=[...])
kernels = 32
kernel_size = (2, 2)
for i in range(conv_layers):
y = conv2d_bn(y, kernel_size, kernels, activation, dropout)
if i % 2 == 0: # or 3 or 4.
y = MaxPooling2D(y)
kernels *= 2
kernel_size = tuple(k+1 for k in kernel_size)
y = GlobalAveragePooling2D()(y)
for i in range(dense_layers):
y = Dropout(dropout)(y)
y = Dense(dense_units)(y)
y = Dense(10, activation='softmax')(y)
model = KerasClassifier(build_model,
epochs=epochs,
validation_split=validation_split,
verbose=0,
...)
params = dict(conv_layers=[2, 3, 4],
dense_layers=[0, 1],
activation=['relu', 'selu'],
dropout=[.2, .3, .5],
callbacks=[callbacks.EarlyStopping(patience=10,
restore_best_weights=True)])
grid = GridSearchCV(model, params,
scoring='balanced_accuracy_score',
verbose=2,
n_jobs=1)
Now, combining hyperparams searching with the NumpyArrayIterator is a little tricky, because the latter assumes we have all training samples (and targets) at hand before the training steps. It's still doable, though:
g = ImageDataGenerator(...)
cv = StratifiedKFold(n_splits=3)
results = dict(params=[], valid_score=[])
for params in ParameterGrid(params):
fold_scores = []
for t, v in cv.split(train_data, train_labels):
train = g.flow(train_data[t], train_labels[t], subset='training')
nn_valid = g.flow(train_data[t], train_labels[t], subset='validation')
fold_valid = g.flow(train_data[v], train_labels[v])
nn = build_model(**params)
nn.fit_generator(train, validation_data=nn_valid, ...)
probabilities = nn.predict_generator(fold_valid, steps=...)
p = np.argmax(probabilities, axis=1)
fold_scores += [metrics.accuracy_score(valid.classes_, p)]
results['params'] += [params]
results['valid_score'] += [fold_scores]
best_ix = np.argmax(np.mean(results['valid_score'], axis=1))
best_params = results['params'][best_ix]
nn = build_model(**best_params)
nn.fit_generator(...)

Average weights in keras models

How to average weights in Keras models, when I train few models with the same architecture with different initialisations?
Now my code looks something like this?
datagen = ImageDataGenerator(rotation_range=15,
width_shift_range=2.0/28,
height_shift_range=2.0/28
)
epochs = 40
lr = (1.234e-3)
optimizer = Adam(lr=lr)
main_input = Input(shape= (28,28,1), name='main_input')
sub_models = []
for i in range(5):
x = Conv2D(32, kernel_size=(3,3), strides=1)(main_input)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPool2D(pool_size=2)(x)
x = Conv2D(64, kernel_size=(3,3), strides=1)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPool2D(pool_size=2)(x)
x = Conv2D(64, kernel_size=(3,3), strides=1)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Flatten()(x)
x = Dense(1024)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Dropout(0.1)(x)
x = Dense(256)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Dropout(0.4)(x)
x = Dense(10, activation='softmax')(x)
sub_models.append(x)
x = keras.layers.average(sub_models)
main_output = keras.layers.average(sub_models)
model = Model(inputs=[main_input], outputs=[main_output])
model.compile(loss='categorical_crossentropy', metrics=['accuracy'],
optimizer=optimizer)
print(model.summary())
plot_model(model, to_file='model.png')
filepath="weights.best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
tensorboard = TensorBoard(log_dir='./Graph', histogram_freq=0, write_graph=True, write_images=True)
callbacks = [checkpoint, tensorboard]
model.fit_generator(datagen.flow(X_train, y_train, batch_size=128),
steps_per_epoch=len(X_train) / 128,
epochs=epochs,
callbacks=callbacks,
verbose=1,
validation_data=(X_test, y_test))
So now I average only last layer, but I want to average weights in all layers after training each one separately.
Thanks!
So let's assume that models is a collection of your models. First - collect all weights:
weights = [model.get_weights() for model in models]
Now - create a new averaged weights:
new_weights = list()
for weights_list_tuple in zip(*weights):
new_weights.append(
[numpy.array(weights_).mean(axis=0)\
for weights_ in zip(*weights_list_tuple)])
And what is left is to set these weights in a new model:
new_model.set_weights(new_weights)
Of course - averaging weights might be a bad idea, but in case you try - you should follow this approach.
I can't comment on the accepted answer, but to make it work on tensorflow 2.0 with tf.keras I had to make the list in the loop into a numpy array:
new_weights = list()
for weights_list_tuple in zip(*weights):
new_weights.append(
np.array([np.array(w).mean(axis=0) for w in zip(*weights_list_tuple)])
)
If different input models need to be weighted differently, np.array(w).mean(axis=0) needs to be replaced with np.average(np.array(w),axis=0, weights=relative_weights) where relative_weights is an array with a weight factor for each model.