TensorFlow Keras Optimise prediction - tensorflow

I'm Using tensorflow and keras to predict handwrting digits. For training I'm using nmist dataset.
the accuracy is about 98.8% after training. but sometimes in test its confuse between 4 and 9 , 7 and 3, i'm alerady optimize the image input with opencv, like remove noise, rescale, threshold etc.
What should i do next to improved this prdiction accuracy?
My plan is add more sample, and resize the sample image from 28x28 to 56x56.
Will this affect accuracy?
This my model for training:
epoc=15, batch size=64
def build_model():
model = Sequential()
# add Convolutional layers
model.add(Conv2D(filters=32, kernel_size=(3,3), activation='relu', padding='same', input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(filters=64, kernel_size=(3,3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(filters=64, kernel_size=(3,3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
# Densely connected layers
model.add(Dense(128, activation='relu'))
# output layer
model.add(Dense(10, activation='softmax'))
# compile with adam optimizer & categorical_crossentropy loss function
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
return model

You can try to add regularization:
def conv2d_bn(x,
units,
kernel_size=(3, 3),
activation='relu',
dropout=.5):
y = Dropout(x)
y = Conv2D(units, kernel_size=kernel_size, use_bias=False)(y)
y = BatchNormalization(y)
y = Activation(activation)(y)
return y
def build_model(..., dropout=.5):
x = Input(shape=[...])
y = conv2d_bn(x, 32)
y = MaxPooling2D(y)
...
y = Dropout(dropout)(y)
y = Dense(10, activation='softmax')
model = Model(x, y)
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
You can tweak the class weights to force the model to pay more attention to classes 3, 4, 7 and 9 during training:
model.fit(..., class_weights={0: 1, 1: 1, 2:1, 3:2, 4:2, 5:1, 6:1, 7:2, 8:1, 9:2})
If you have some time to burn, you can also try to grid or random-search the models hyperparameters. Something in the lines:
def build(conv_layers, dense_layers, dense_units, activation, dropout):
y = x = Input(shape=[...])
kernels = 32
kernel_size = (2, 2)
for i in range(conv_layers):
y = conv2d_bn(y, kernel_size, kernels, activation, dropout)
if i % 2 == 0: # or 3 or 4.
y = MaxPooling2D(y)
kernels *= 2
kernel_size = tuple(k+1 for k in kernel_size)
y = GlobalAveragePooling2D()(y)
for i in range(dense_layers):
y = Dropout(dropout)(y)
y = Dense(dense_units)(y)
y = Dense(10, activation='softmax')(y)
model = KerasClassifier(build_model,
epochs=epochs,
validation_split=validation_split,
verbose=0,
...)
params = dict(conv_layers=[2, 3, 4],
dense_layers=[0, 1],
activation=['relu', 'selu'],
dropout=[.2, .3, .5],
callbacks=[callbacks.EarlyStopping(patience=10,
restore_best_weights=True)])
grid = GridSearchCV(model, params,
scoring='balanced_accuracy_score',
verbose=2,
n_jobs=1)
Now, combining hyperparams searching with the NumpyArrayIterator is a little tricky, because the latter assumes we have all training samples (and targets) at hand before the training steps. It's still doable, though:
g = ImageDataGenerator(...)
cv = StratifiedKFold(n_splits=3)
results = dict(params=[], valid_score=[])
for params in ParameterGrid(params):
fold_scores = []
for t, v in cv.split(train_data, train_labels):
train = g.flow(train_data[t], train_labels[t], subset='training')
nn_valid = g.flow(train_data[t], train_labels[t], subset='validation')
fold_valid = g.flow(train_data[v], train_labels[v])
nn = build_model(**params)
nn.fit_generator(train, validation_data=nn_valid, ...)
probabilities = nn.predict_generator(fold_valid, steps=...)
p = np.argmax(probabilities, axis=1)
fold_scores += [metrics.accuracy_score(valid.classes_, p)]
results['params'] += [params]
results['valid_score'] += [fold_scores]
best_ix = np.argmax(np.mean(results['valid_score'], axis=1))
best_params = results['params'][best_ix]
nn = build_model(**best_params)
nn.fit_generator(...)

Related

What is wrong with my neural networks prediction code? All predictions are returning the same class name for every image

Here is my training code:
def train():
#START
img_input = layers.Input(shape=(150, 150, 3))
x = layers.Conv2D(16, 3, activation='relu')(img_input)
x = layers.MaxPooling2D(2)(x)
x = layers.Conv2D(32, 3, activation='relu')(x)
x = layers.MaxPooling2D(2)(x)
x = layers.Conv2D(64, 3, activation='relu')(x)
x = layers.MaxPooling2D(2)(x)
x = layers.Flatten()(x)
x = layers.Dense(512, activation='relu')(x)
output = layers.Dense(1, activation='sigmoid')(x)
model = Model(img_input, output)
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=0.001),
metrics=['acc'])
#END
# All images will be rescaled by 1./255
train_datagen = ImageDataGenerator(rescale=1./255)
val_datagen = ImageDataGenerator(rescale=1./255)
bs = 20
# Flow training images in batches of 20 using train_datagen generator
train_generator = train_datagen.flow_from_directory(
train_dir, # This is the source directory for training images
target_size=(150, 150), # All images will be resized to 150x150
batch_size=bs,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
# Flow validation images in batches of 20 using val_datagen generator
validation_generator = val_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=bs,
class_mode='binary')
history = model.fit(
train_generator,
steps_per_epoch=train_steps,
epochs=4,
validation_data=validation_generator,
validation_steps=val_steps,
verbose=1)
model.save_weights("trained_weights.h5")
Here is my prediction code:
def evaluate(imgpath):
if not os.path.isfile(imgpath):
print("No such file: {}".format(imgpath))
sys.exit(-1)
# START
img_input = layers.Input(shape=(150, 150, 3))
x = layers.Conv2D(16, 3, activation='relu')(img_input)
x = layers.MaxPooling2D(2)(x)
x = layers.Conv2D(32, 3, activation='relu')(x)
x = layers.MaxPooling2D(2)(x)
x = layers.Conv2D(64, 3, activation='relu')(x)
x = layers.MaxPooling2D(2)(x)
x = layers.Flatten()(x)
x = layers.Dense(512, activation='relu')(x)
output = layers.Dense(1, activation='sigmoid')(x)
model = Model(img_input, output)
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=0.001),
metrics=['acc'])
# END
model.load_weights("trained_weights.h5")
img = image.load_img(path=imgpath,grayscale=False,target_size=(150,150),color_mode='rgb')
img_arr = image.img_to_array(img)
test_img = np.expand_dims(img_arr, axis=0)
y_prob = model.predict(test_img)
classname = y_prob.argmax(axis=-1)
print("Class: ",classname)
return classname
I have a feeling that the error is somewhere in the last 5-6 lines of the evaluate function, where I am loading the image. The problem is that whenever I run the evaluate function for any image, my output is [0]. Even though the training went well, as seen in the image below.
enter image description here
Am I making some silly mistake somewhere?
since you have a single neuron as the top layer, when you do predictions you will get a single prediction. Since you have a single prediction using argmax will always return 0. What you need to do is to set a threshold value for the prediction for example
if yprob>=.5:
klass=1
else:
klass=0
Also as pointed out by Dr. Snoopy you should rescale your image by 1/255.

Keras model, getting the same loss even after changing the weights

I am trying to implement a meta learining for the omniglot data set but something is not right.
Here is the code:
def get_siamese_model(input_shape):
"""
Model architecture based on the one provided in: http://www.cs.utoronto.ca/~gkoch/files/msc-thesis.pdf
"""
# Define the tensors for the two input images
left_input = Input(input_shape)
right_input = Input(input_shape)
# Convolutional Neural Network
model = Sequential()
model.add(Conv2D(64, (10,10), activation='relu', input_shape=input_shape,
kernel_initializer=initialize_weights, kernel_regularizer=l2(2e-4)))
model.add(MaxPooling2D())
model.add(Conv2D(128, (7,7), activation='relu',
kernel_initializer=initialize_weights,
bias_initializer=initialize_bias, kernel_regularizer=l2(2e-4)))
model.add(MaxPooling2D())
model.add(Conv2D(128, (4,4), activation='relu', kernel_initializer=initialize_weights,
bias_initializer=initialize_bias, kernel_regularizer=l2(2e-4)))
model.add(MaxPooling2D())
model.add(Conv2D(256, (4,4), activation='relu', kernel_initializer=initialize_weights,
bias_initializer=initialize_bias, kernel_regularizer=l2(2e-4)))
model.add(Flatten())
model.add(Dense(4096, activation='sigmoid',
kernel_regularizer=l2(1e-3),
kernel_initializer=initialize_weights,bias_initializer=initialize_bias))
# Generate the encodings (feature vectors) for the two images
encoded_l = model(left_input)
encoded_r = model(right_input)
# # Add a customized layer to compute the absolute difference between the encodings
# L1_layer = Lambda(lambda tensors:K.abs(tensors[0] - tensors[1]))
# L1_distance = L1_layer([encoded_l, encoded_r])
# # Add a dense layer with a sigmoid unit to generate the similarity score
# prediction = Dense(1,activation='sigmoid',bias_initializer=initialize_bias)(L1_distance)
#Connect the inputs with the outputs
siamese_net = Model(inputs=[left_input,right_input],outputs=[encoded_l, encoded_r])
# return the model
return siamese_net
def forward(model, x1, x2):
return model.call([x1,x2])
model = get_siamese_model((105, 105, 1))
test_loss = tf.convert_to_tensor(0.0)
with tf.GradientTape() as test_tape:
test_tape.watch(model.trainable_weights)
test_tape.watch(test_loss)
x, y = get_batch(32)
x1 = tf.cast(tf.convert_to_tensor(x[0]), dtype=tf.float32)
x2 = tf.cast(tf.convert_to_tensor(x[1]), dtype=tf.float32)
y1 = tf.cast(tf.convert_to_tensor(y), dtype=tf.float32)
train_loss = tf.convert_to_tensor(0.0)
with tf.GradientTape() as train_tape:
train_tape.watch(model.trainable_weights)
train_tape.watch(train_loss)
train_loss = contrastive_loss(forward(model, x1, x2), y1)
gradients = train_tape.gradient(train_loss, model.trainable_weights)
old_weights = model.get_weights()
model.set_weights([w - 0.01 * g for w, g in zip(model.trainable_weights, gradients)])
test_loss = contrastive_loss(forward(model, x1, x2), y1)
model.set_weights(old_weights)
print(train_loss)
print(test_loss)
Results:
tf.Tensor(8.294627, shape=(), dtype=float32)
tf.Tensor(8.294627, shape=(), dtype=float32)
Why am I getting the same loss? As you can see the weights are changed but output is the same. Changing the weights should result in a different output which should result in a different loss? Maybe forward changes the weights again?
I assume you are using a crossentropy loss function. The loss you are seeing (8.2...) is essentially the maximum possible loss which means there’s an overflow in the loss calculation. This can commonly happen for example if you predictions are outside of the range 0-1 or if your if you are predicting exactly 0.

How can I alternately train two models in keras with different optimizers?

I have two models that draw their input values from the same training dataset. I am trying to train the two models alternately with two optimizers that have different learning rates. Hence, while training one model, I have to freeze the weights of the other model and vice versa. However, the approach that I am using is taking too long to train and even gives an OOM error. However, when I simply train the models together, no such problem occurs.
The code snippet and the image of a sample model are attached below. However, the actual models have numerous layers and high dimensional input.
def convA(x):
conv1 = keras.layers.Conv2D(64, (3,3), strides=(1, 1), padding='valid', activation='relu', name = 'conv21')(x)
conv2 = keras.layers.Conv2D(16, (3,3), strides=(1, 1), padding='valid',activation='relu', name = 'conv22')(conv1)
return conv2
def convB(x):
conv1 = keras.layers.Conv2D(64, (3,3), strides=(1, 1), padding='valid', activation='relu', name = 'conv2a')(x)
conv2 = keras.layers.Conv2D(16, (3,3), strides=(1, 1), padding='valid',activation='relu', name = 'conv2b')(conv1)
return conv2
x = Input(shape=(11,11,32), name='input1')
convP = convA(x)
convQ = convB(x)
model1 = Model(x,convP)
model2 = Model(x,convQ)
multiply_layer = keras.layers.Multiply()([model1(x), model2(x)])
conv1_reshape = keras.layers.Reshape([7*7*16],name = 'fc_reshape')(multiply_layer)
fc = keras.layers.Dense(15, activation='softmax', name = 'fc1')(conv1_reshape)
model_main = Model(x,fc)
optim1 = keras.optimizers.SGD(0.0009, momentum=0.01, nesterov=True)
optim2 = keras.optimizers.SGD(0.00009, momentum=0.01, nesterov=True)
for epoch in range(250):
for batch in range(100):
x_batch = x_train[batch*16:(batch+1)*16,:,:,:]
y_batch = y_train[batch*16:(batch+1)*16,:]
model1.trainable = False
model2.trainable = True
model_main.compile(loss='categorical_crossentropy', optimizer=optim1, metrics=['accuracy'])
model_main.train_on_batch(x_batch, y_batch)
model1.trainable = True
model2.trainable = False
model_main.compile(loss='categorical_crossentropy', optimizer=optim2, metrics=['accuracy'])
model_main.train_on_batch(x_batch, y_batch)

training output is not in the valid range by using CNN with LSTM

I use keras with tf as the backend.
The goal of the simulation is attempting to use geo-spatial time series dataset to build a classifier. The target Y is labeled on -1, 0, 1 and 2, where -1 indicates the measured data at that grid point, 0 is meaning the data at good quality, 1 is middle quality and 2 is the worst.
Right now, i have two inputs. I have some atmospheric surface variables, such as wind, wind speed, and rain as one input. And , oceanic surface variables, such as sea surface temperature, and sea surface salinity as second input. The dimensions of all the datasets should be in, for example, (n_samples, n_timesteps, n_variables, n_xpoints: longitude, n_ypoints: latitude). The target dataset is in 3D dimensions like this: (n_samples, n_xpoints: longitude, n_ypoints: latitude).
In addition, all of the input variables are normalized by their value range. For example, the sea surface current velocity is normalized in the rage of (-1,1) from (-2, 2) [m/s], and the surface wind speed is normalized in the rage of (-1,1) from (-20,20) [m/s].
The model configuration is designed as described below.
def cnn():
model = Sequential()
model.add( Conv2D(64, (3,3), activation='relu',
data_format='channels_first', kernel_initializer='he_normal',
name='conv1') )
model.add( MaxPooling2D(pool_size=(2, 2), strides = (2,2)))
model.add( BatchNormalization() )
model.add( Conv2D(32, (3,3), activation='relu',
kernel_initializer='he_normal', data_format='channels_first',
name='conv2') )
model.add( MaxPooling2D(pool_size=(2, 2), strides = (2,2)))
model.add( Dropout(0.2) )
model.add( BatchNormalization() )
model.add( Activation('relu') )
model.add( MaxPooling2D(pool_size=(2, 2), strides = (2,2)))
model.add( Flatten() )
model.add( Dense(128,activation='relu') )
return model
def cnn2lstm(Input_shape, premo, name):
branch_in = Input(shape=Input_shape, dtype='float32')
model = TimeDistributed(premo)(branch_in)
model = LSTM(256, return_sequences=True, name=name+'_lstm1')(model)
model = TimeDistributed(Dense(4096, activation='relu'))(model)
model = TimeDistributed(Dropout(0.3))(model)
model = LSTM(256, return_sequences = True, name=name+'_lstm2')(model)
model = Dense(101, activation='sigmoid')(model)
model = Dropout(0.3)(model)
return branch_in, model
atm_in, atm = cnn2lstm(Train_atm.shape[1:], cnn(),'atm')
ocn_in, ocn = cnn2lstm(Train_ocn.shape[1:], cnn(),'ocn')
#--- two inputs into one output
x = keras.layers.concatenate([atm,ocn],axis=1)
x = LSTM(150,return_sequences=True)(x)
x = Dropout(0.2)(x)
x = LSTM(200,return_sequences=True)(x)
x = Dropout(0.2)(x)
x = LSTM(500)(x)
x = Dense(1001,activation='relu')(x)
x = Dense(2001,activation='relu')(x)
x = Dense(2501,activation='tanh')(x)
x = Dense(2701,activation='relu')(x)
x = Dense(3355,activation='softmax')(x)
x = Reshape((61,55),input_shape=(3355,))(x)
model2 = Model(inputs=[atm_in, ocn_in, bio_in], outputs=x)
plot_model(model2, show_shapes = True, to_file='model_way4_2.png')
model2.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
filepath='ways4_model02_best.hdf5'
checkpoint = ModelCheckpoint(filepath,monitor='val_acc', verbose=1,save_best_only=True,mode='max')
callbacks_list = [checkpoint]
hist = model2.fit([Train_atm, Train_ocn, Train_bio], Train_Y,
epochs=150, batch_size=3, validation_split=0.1,
shuffle=True, callbacks=callbacks_list, verbose=0)
scores = model2.evaluate([Train_atm, Train_ocn, Train_bio], Train_Y)
print("MODEL 2 %s: %.2f%%" % (model2.metrics_names[1], scores[1]*100))
The evaluation scores here is mostly like 83% or higher. But the value of the output from model2.predict doesn't give me the valid range like my target dataset. In contrary, the model output give me the value from 0 to 1 (0,1) with a similar pattern as the target dataset shows.
Could anyone tell any big issue I have in my DL algorithm?

Average weights in keras models

How to average weights in Keras models, when I train few models with the same architecture with different initialisations?
Now my code looks something like this?
datagen = ImageDataGenerator(rotation_range=15,
width_shift_range=2.0/28,
height_shift_range=2.0/28
)
epochs = 40
lr = (1.234e-3)
optimizer = Adam(lr=lr)
main_input = Input(shape= (28,28,1), name='main_input')
sub_models = []
for i in range(5):
x = Conv2D(32, kernel_size=(3,3), strides=1)(main_input)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPool2D(pool_size=2)(x)
x = Conv2D(64, kernel_size=(3,3), strides=1)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPool2D(pool_size=2)(x)
x = Conv2D(64, kernel_size=(3,3), strides=1)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Flatten()(x)
x = Dense(1024)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Dropout(0.1)(x)
x = Dense(256)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Dropout(0.4)(x)
x = Dense(10, activation='softmax')(x)
sub_models.append(x)
x = keras.layers.average(sub_models)
main_output = keras.layers.average(sub_models)
model = Model(inputs=[main_input], outputs=[main_output])
model.compile(loss='categorical_crossentropy', metrics=['accuracy'],
optimizer=optimizer)
print(model.summary())
plot_model(model, to_file='model.png')
filepath="weights.best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
tensorboard = TensorBoard(log_dir='./Graph', histogram_freq=0, write_graph=True, write_images=True)
callbacks = [checkpoint, tensorboard]
model.fit_generator(datagen.flow(X_train, y_train, batch_size=128),
steps_per_epoch=len(X_train) / 128,
epochs=epochs,
callbacks=callbacks,
verbose=1,
validation_data=(X_test, y_test))
So now I average only last layer, but I want to average weights in all layers after training each one separately.
Thanks!
So let's assume that models is a collection of your models. First - collect all weights:
weights = [model.get_weights() for model in models]
Now - create a new averaged weights:
new_weights = list()
for weights_list_tuple in zip(*weights):
new_weights.append(
[numpy.array(weights_).mean(axis=0)\
for weights_ in zip(*weights_list_tuple)])
And what is left is to set these weights in a new model:
new_model.set_weights(new_weights)
Of course - averaging weights might be a bad idea, but in case you try - you should follow this approach.
I can't comment on the accepted answer, but to make it work on tensorflow 2.0 with tf.keras I had to make the list in the loop into a numpy array:
new_weights = list()
for weights_list_tuple in zip(*weights):
new_weights.append(
np.array([np.array(w).mean(axis=0) for w in zip(*weights_list_tuple)])
)
If different input models need to be weighted differently, np.array(w).mean(axis=0) needs to be replaced with np.average(np.array(w),axis=0, weights=relative_weights) where relative_weights is an array with a weight factor for each model.