I'm getting a weird error when I try to create a network using an upsampling layer, when I manually set the interpolate keyword to bilinear.
If I leave it out, and go with the default of 'nearest neighbour; it works fine.
Does anyone know what's up?
Code for model. Error is thrown at layer 'up1'
def build_model(self):
chnl4_input = Input(shape=(368, 256, 4))
chnl3_input = Input(shape=(736, 512, 3))
conv1 = Conv2D(26, self.kernel_size, activation='relu', padding='same')(chnl4_input)
conv2 = Conv2D(26, self.kernel_size, strides=(2, 2), activation='relu', padding='same')(conv1)
conv5 = Conv2D(64, self.kernel_size, activation='relu', padding='same')(conv2)
conv6 = Conv2D(64, self.kernel_size, activation='relu', padding='same')(conv5)
up1 = concatenate([UpSampling2D(size=(2, 2), interpolation='bilinear')(conv6), conv1], axis=-1)
conv7 = Conv2D(64, self.kernel_size, activation='relu', padding='same')(up1)
conv8 = Conv2D(64, self.kernel_size, activation='relu', padding='same')(conv7)
conv9 = Conv2D(64, self.kernel_size, activation='relu', padding='same')(conv8)
conv11 = Conv2D(64, self.kernel_size, activation='relu', padding='same')(conv9)
conv12 = Conv2D(64, self.kernel_size, activation='relu', padding='same')(conv11)
up3 = concatenate([UpSampling2D(size=(2, 2), interpolation='bilinear')(conv12), chnl3_input], axis=-1)
conv13 = Conv2D(67, self.kernel_size, activation='relu', padding='same')(up3)
conv14 = Conv2D(67, self.kernel_size, activation='relu', padding='same')(conv13)
conv15 = Conv2D(32, self.kernel_size, activation='relu', padding='same')(conv14)
conv16 = Conv2D(3, self.kernel_size, activation='relu', padding='same')(conv15)
out = conv16
self.model = Model(inputs=[chnl4_input, chnl3_input], outputs=[out])
self.model.compile(optimizer=self.optimizer_func, loss=self.loss_func)
self.model.name = 'UNET'
return self.modele here
Error : TypeError: ('Keyword argument not understood:', 'interpolation')
~/MastersWork/Fergal/Scripts/models.py in build_model(self)
29 conv6 = Conv2D(64, self.kernel_size, activation='relu', padding='same')(conv5)
30
---> 31 up1 = concatenate([UpSampling2D(size=(2, 2), interpolation='bilinear')(conv6), conv1], axis=-1)
32 conv7 = Conv2D(64, self.kernel_size, activation='relu', padding='same')(up1)
33
~/anaconda3/envs/rhys_tensorflow/lib/python3.6/site-packages/keras/legacy/interfaces.py in wrapper(*args, **kwargs)
89 warnings.warn('Update your `' + object_name +
90 '` call to the Keras 2 API: ' + signature, stacklevel=2)
---> 91 return func(*args, **kwargs)
92 wrapper._original_function = func
93 return wrapper
~/anaconda3/envs/rhys_tensorflow/lib/python3.6/site-packages/keras/layers/convolutional.py in __init__(self, size, data_format, **kwargs)
1804 #interfaces.legacy_upsampling2d_support
1805 def __init__(self, size=(2, 2), data_format=None, **kwargs):
-> 1806 super(UpSampling2D, self).__init__(**kwargs)
1807 self.data_format = conv_utils.normalize_data_format(data_format)
1808 self.size = conv_utils.normalize_tuple(size, 2, 'size')
~/anaconda3/envs/rhys_tensorflow/lib/python3.6/site-packages/keras/engine/topology.py in __init__(self, **kwargs)
291 for kwarg in kwargs:
292 if kwarg not in allowed_kwargs:
--> 293 raise TypeError('Keyword argument not understood:', kwarg)
294 name = kwargs.get('name')
295 if not name:
For reference, the Keras page regarding upSampling2D
https://www.tensorflow.org/api_docs/python/tf/keras/layers/UpSampling2D
def bilinear_upsameple(tensor, size):
y = tf.image.resize_bilinear(images=tensor, size=size)
return y
dims = K.int_shape(input_tensor)
y_scaled = Lambda(lambda x : bilinear_upsameple(tensor=x, size=(dims[1]*scale, dims[2]*scale)))(input_tensor)
here is a work around for bilinear upsampling, using lambda layer and tf.image.resize_bilinear
works fine on tf 1.12.0
Related
I am building a reinforcement learning model.
I am trying to use PRelu in my 2D Conv model using tensorflow.
Here is the code for Actor Model.
code:
from tensorflow.keras.layers import Conv2D, Input, MaxPool1D, concatenate, Lambda, Dense, Flatten
import tensorflow as tf
# activation = tf.keras.layers.LeakyReLU(alpha=0.5)
activation = tf.keras.layers.PReLU(alpha_initializer=tf.initializers.constant(0.25))
def ActorNetwork(input_shape_A,input_shape_B, n_actions):
input_layer_A = Input(shape=input_shape_A[1:], name="input_layer_A")
input_layer_B = Input(shape=input_shape_B[1:], name="input_layer_B")
Rescale = Lambda(lambda x: tf.divide(tf.subtract(x, tf.reduce_max(x)), tf.subtract(tf.reduce_max(x), tf.reduce_min(x))))(input_layer_A)
Conv1 = Conv2D(32, 3, activation= activation, padding='same', name="Conv1")(Rescale)
Conv2 = Conv2D(32, 3, activation=activation, padding='same', name="Conv2")(Conv1)
Conv_pool_1 = Conv2D(32, 2, strides=2, activation='relu', padding='same', name="Conv_pool_1")(Conv2)
Batchnorm_1 = tf.keras.layers.BatchNormalization(name='Batchnorm_1')(Conv_pool_1)
Conv3 = Conv2D(32, 3, activation= activation, padding='same', name="Conv3")(Batchnorm_1)
Conv4 = Conv2D(32, 3, activation=activation, padding='same', name="Conv4")(Conv3)
Conv_pool_2 = Conv2D(32, 2, strides=2, activation='relu', padding='same', name="Conv_pool_2")(Conv4)
Batchnorm_2 = tf.keras.layers.BatchNormalization(name='Batchnorm_2')(Conv_pool_2)
Conv5 = Conv2D(64, 3, activation= activation, padding='same', name="Conv5")(Batchnorm_2)
Conv6 = Conv2D(64, 3, activation=activation, padding='same', name="Conv6")(Conv5)
Conv_pool_3 = Conv2D(64, 2, strides=2, activation='relu', padding='same', name="Conv_pool_3")(Conv6)
Batchnorm_3 = tf.keras.layers.BatchNormalization(name='Batchnorm_3')(Conv_pool_3)
Conv7 = Conv2D(64, 3, activation= activation, padding='same', name="Conv7")(Batchnorm_3)
Conv8 = Conv2D(64, 3, activation=activation, padding='same', name="Conv8")(Conv7)
Conv_pool_4 = Conv2D(64, 2, strides=2, activation='relu', padding='same', name="Conv_pool_4")(Conv8)
Batchnorm_4 = tf.keras.layers.BatchNormalization(name='Batchnorm_4')(Conv_pool_4)
Conv9 = Conv2D(128, 3, activation= activation, padding='same', name="Conv9")(Batchnorm_4)
Conv10 = Conv2D(128, 3, activation=activation, padding='same', name="Conv10")(Conv9)
Conv_pool_5 = Conv2D(128, 2, strides=2, activation='relu', padding='same', name="Conv_pool_5")(Conv10)
Batchnorm_5 = tf.keras.layers.BatchNormalization(name='Batchnorm_5')(Conv_pool_5)
Conv11 = Conv2D(128, 3, activation= activation, padding='same', name="Conv11")(Batchnorm_5)
Conv12 = Conv2D(128, 3, activation=activation, padding='same', name="Conv12")(Conv11)
Conv_pool_6 = Conv2D(128, 2, strides=2, activation='relu', padding='same', name="Conv_pool_6")(Conv12)
Batchnorm_6 = tf.keras.layers.BatchNormalization(name='Batchnorm_6')(Conv_pool_6)
Conv_pool_7 = Conv2D(128, 1, strides=1, activation='relu', padding='same', name="Conv_pool_7")(Batchnorm_6)
Conv_pool_8 = Conv2D(64, 1, strides=1, activation='relu', padding='same', name="Conv_pool_8")(Conv_pool_7)
Conv_pool_9 = Conv2D(32, 1, strides=1, activation='relu', padding='same', name="Conv_pool_9")(Conv_pool_8)
flatten = Flatten()(Conv_pool_9)
Concat_2 = tf.keras.layers.concatenate([flatten, input_layer_B], axis=-1,name='Concat_2')
fc1 = Dense(8194, activation='relu', name="fc1")(Concat_2)
fc2 = Dense(4096, activation='relu', name="fc2")(fc1)
fc3 = Dense(n_actions, activation='softmax', name="fc3")(fc2)
return tf.keras.models.Model(inputs=[input_layer_A,input_layer_B], outputs = fc3, name="actor_model")
model=ActorNetwork((1,1000,4000,1),(1,2),3)
model.compile()
model.summary()
print(model([tf.random.uniform((1,1000,4000,1)),tf.random.uniform((1,2))]))
tf.keras.utils.plot_model(model, show_shapes=True)
I works fine with LeakyRelu but when i use Prelu i throws error related to dimensions. I dont understand it
Error:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-17-0a596da4bc68> in <module>
131
132
--> 133 model=ActorNetwork((1,1000,4000,1),(1,2),3)
134 model.compile()
135 model.summary()
2 frames
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/ops.py in _create_c_op(graph, node_def, inputs, control_inputs, op_def)
2011 except errors.InvalidArgumentError as e:
2012 # Convert to ValueError for backwards compatibility.
-> 2013 raise ValueError(e.message)
2014
2015 return c_op
ValueError: Exception encountered when calling layer "p_re_lu_10" (type PReLU).
Dimensions must be equal, but are 1000 and 500 for '{{node Conv3/p_re_lu_10/mul}} = Mul[T=DT_FLOAT](Conv3/p_re_lu_10/Neg, Conv3/p_re_lu_10/Relu_1)' with input shapes: [1000,4000,32], [?,500,2000,32].
Call arguments received:
• inputs=tf.Tensor(shape=(None, 500, 2000, 32), dtype=float32)
What am i doing wrong here?
The PReLu activation function maintains a learnable parameter alpha that has the same shape as the input of the function. You can read more in the documentation.
You need to define a new layer each time you want to use that activation function.
i.e
Conv1 = Conv2D(32, 3, activation=None, padding='same', name="Conv1")(Rescale)
Conv1_p_relu = tf.keras.layers.PReLU(alpha_initializer=tf.initializers.constant(0.25))(Conv1)
Conv2 = Conv2D(32, 3, activation=None, padding='same', name="Conv2")(Conv1_p_relu)
Conv2_p_relu = tf.keras.layers.PReLU(alpha_initializer=tf.initializers.constant(0.25))(Conv2)
Conv_pool_1 = Conv2D(32, 2, strides=2, activation='relu', padding='same', name="Conv_pool_1")(Conv2_p_relu)
I'm trying to use autoencoder to reconstruct images with size of (128,128,3). I want to use attention layer at the last layer of encoder part. but I got the below error. how can I fix it?
Attention layer must be called on a list of inputs, namely [query, value] or [query, value, key]. Received: Tensor("Placeholder:0", shape=(None, 64, 64, 8), dtype=float32).
def encoder(input_img):
#encoder
#input = 28 x 28 x 1 (wide and thin)
conv1 = Conv2D(24, (2,2), activation='relu', padding='same')(input_img) #28 x 28 x 32
conv2 = BatchNormalization()(conv1)
conv3 = Conv2D(12, (2,2), activation='relu', padding='same')(conv2)
conv4 = BatchNormalization()(conv3)
pool5 = MaxPooling2D(pool_size=(2,2))(conv4) #14 x 14 x 32
conv6 = Conv2D(8, (2,2), activation='relu', padding='same')(pool5) #14 x 14 x 64
conv6 = Attention(use_scale=True)(conv6)
conv7 = BatchNormalization()(conv6)
conv8 = Conv2D(4, (2,2), activation='relu', padding='same')(conv7)
return conv8
def decoder(conv8):
#decoder
conv12 = Conv2D(4, (2,2), activation='relu', padding='same')(conv8)
conv14 = BatchNormalization()(conv12)
conv15 = Conv2D(8, (2,2), activation='relu', padding='same')(conv14)
conv16 = BatchNormalization()(conv15)
conv17 = Conv2D(12, (2,2), activation='relu', padding='same')(conv16) #7 x 7 x 64
conv18 = BatchNormalization()(conv17)
conv19 = Conv2D(24, (2,2), activation='relu', padding='same')(conv18)
conv20 = BatchNormalization()(conv19)
up21 = UpSampling2D((2,2))(conv20) #14 x 14 x 64
decoded = Conv2D(3, (2,2), activation='tanh', padding='same')(up21) # 28 x 28 x
return decoded
opt = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=False, reduction="auto", name="sparse_categorical_crossentropy")
autoencoder = Model(input_img, decoder(encoder(input_img)))
autoencoder.compile(loss='mae', optimizer = 'adam')
autoencoder.summary()
The callback is saving checkpoint files, but not the SavedModel model.pb file. Additionally, when I load the model from the checkpoints it does not reload 'val_loss' which I'm conditioning "save_best_model" on.
I tried using a model.save() only on the best iteration but was having trouble with getting that to work correctly and it would be more convenient to use the ModelCheckpoint callback.
Here is the relevant code
LOSS = tf.keras.losses.MeanSquaredError(),
#multi output 3 categories from 0 to 1
model = ImgToClassSimpleContinuous(img_height, img_width)
checkpoint_filename = "../chkpts/ImgToClassSimpleContinuous/checkpoint_dir"
model.load_weights(checkpoint_filename)
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_filename,
verbose=1,mode='min', monitor="val_loss", save_best_only=True, save_weights_only=False)
model.compile(
optimizer='adam',
loss = [LOSS, LOSS, LOSS],
metrics=['mse'])
model.fit(
dataset_to_use,
validation_data = dataset_validation_batched,
# validation_steps=50,
epochs=MAX_EPOCHS,
batch_size=BATCH_SIZE,
callbacks=[cp_callback]
)
class ImgToClassSimpleContinuous(Model):
'''
pair with loss = categorical_crossentropy
'''
in_types = [DataType.d]
out_types = [DataType.tlc, DataType.tls, DataType.tll]
def __init__(self, img_height, img_width, *args, **kwargs):
super().__init__(ImgToClassSimple, *args, **kwargs)
initializer = 'he_normal'
input_shape = (img_height, img_width, 1)
inputs = tf.keras.Input(shape=input_shape)
flat_pix = layers.Flatten()(inputs)
x = layers.Conv2D(8, 3, padding='same', kernel_initializer=initializer)(inputs)
x = layers.PReLU()(x)
x = layers.Conv2D(8, 3, padding='same', kernel_initializer=initializer)(x)
x = layers.PReLU()(x)
x = layers.MaxPooling2D(pool_size=(2, 2))(x)
x = layers.BatchNormalization()(x)
x = layers.Conv2D(16, 3, padding='same', kernel_initializer=initializer)(x)
x = layers.PReLU()(x)
x = layers.Conv2D(16, 3, padding='same', kernel_initializer=initializer)(x)
x = layers.PReLU()(x)
x = layers.MaxPooling2D(pool_size=(2, 2))(x)
x = layers.BatchNormalization()(x)
t = layers.Conv2D(32, 3, padding='same', kernel_initializer=initializer)(x)
t = layers.PReLU()(t)
t = layers.Conv2D(32, 3, padding='same', kernel_initializer=initializer)(t)
t = layers.PReLU()(t)
t = layers.MaxPooling2D(pool_size=(2, 2))(t)
t = layers.BatchNormalization()(t)
t = tf.keras.layers.GlobalAveragePooling2D()(t)
t = layers.Flatten()(t)
s = layers.Conv2D(32, 3, padding='same', kernel_initializer=initializer)(x)
s = layers.PReLU()(s)
s = layers.Conv2D(32, 3, padding='same', kernel_initializer=initializer)(s)
s = layers.PReLU()(s)
s = layers.MaxPooling2D(pool_size=(2, 2))(s)
s = layers.BatchNormalization()(s)
s = tf.keras.layers.GlobalAveragePooling2D()(s)
s = layers.Flatten()(s)
l = layers.Conv2D(32, 3, padding='same', kernel_initializer=initializer)(x)
l = layers.PReLU()(l)
l = layers.Conv2D(32, 3, padding='same', kernel_initializer=initializer)(l)
l = layers.PReLU()(l)
l = layers.MaxPooling2D(pool_size=(2, 2))(l)
l = layers.BatchNormalization()(l)
l = tf.keras.layers.GlobalAveragePooling2D()(l)
l = layers.Flatten()(l)
t = layers.Dense(1, activation='sigmoid')(t)
s = layers.Dense(1, activation='sigmoid')(s)
l = layers.Dense(1, activation='sigmoid')(l)
# A Dense classifier with a single unit (binary classification)
self.model = tf.keras.Model(inputs, [t, s, l])
tf.keras.utils.plot_model(self.model, to_file="...", show_shapes=True)
def call(self, x):
return self.model(x)
I am attempting to train a model in which the input exceeds the memory limits for a single GPU on the system (16 GB P100). The size of the input is (1,256,256,64,2). However, I have access to 4 identical GPUs on the system. I know I can distribute processes with tf.distribute but I am unsure how to do this with a batch size of 1. Is it possible to distribute a single sample over multiple GPUs so I don't receive OOM errors?
Edit:
Here is the code used to build the model.
def dice_loss(y_true, y_pred):
numerator = 2 * tf.reduce_sum(y_true * y_pred, axis=(1,2,3))
denominator = tf.reduce_sum(y_true + y_pred, axis=(1,2,3))
return tf.reshape(1 - numerator / denominator, (-1, 1, 1))
class ResidualUnitEncode(keras.layers.Layer):
def __init__(self, filters=1, strides=1, activation="relu", **kwargs):
super().__init__(**kwargs)
self.activation = keras.activations.get(activation)
self.main_layers = [
keras.layers.Conv3D(filters, (3, 3, 3), strides=strides,
padding="same", use_bias=False),
keras.layers.BatchNormalization(),
self.activation,
keras.layers.Conv3D(filters, (3, 3, 3), strides=1,
padding="same", use_bias=False),
keras.layers.BatchNormalization()]
self.skip_layers = []
if strides > 1:
self.skip_layers = [
keras.layers.Conv3D(filters, (1, 1, 1), strides=strides,
padding="same", use_bias=False),
keras.layers.BatchNormalization()]
def call(self, inputs):
Z = inputs
for layer in self.main_layers:
Z = layer(Z)
skip_Z = inputs
for layer in self.skip_layers:
skip_Z = layer(skip_Z)
return self.activation(Z + skip_Z)
def get_config(self):
base_config = super(ResidualUnitEncode, self).get_config()
return base_config
class ResidualUnitDecode(keras.layers.Layer):
def __init__(self, filters=1, strides=1, activation="relu", **kwargs):
super().__init__(**kwargs)
self.activation = keras.activations.get(activation)
self.main_layers = [
keras.layers.Conv3DTranspose(filters, (3, 3, 3), strides=1,
padding="same", use_bias=False),
keras.layers.BatchNormalization(),
self.activation,
keras.layers.Conv3DTranspose(filters, (3, 3, 3), strides=strides,
padding="same", use_bias=False),
keras.layers.BatchNormalization()]
self.skip_layers = []
if strides > 1:
self.skip_layers = [
keras.layers.Conv3DTranspose(filters, (3, 3, 3), strides=strides,
padding="same", use_bias=False),
keras.layers.BatchNormalization()]
def call(self, inputs):
Z = inputs
for layer in self.main_layers:
Z = layer(Z)
skip_Z = inputs
for layer in self.skip_layers:
skip_Z = layer(skip_Z)
return self.activation(Z + skip_Z)
def get_config(self):
base_config = super(ResidualUnitDecode, self).get_config()
return base_config
def build_unet(image_shape, batch_size):
inputs = keras.layers.Input(shape=image_shape, batch_size=batch_size)
conv1 = keras.layers.Conv3D(64, (7, 7, 7), strides=(2, 2, 1), padding="same", use_bias=False, input_shape=image_shape)(inputs)
conv1 = keras.layers.BatchNormalization()(conv1)
conv1 = keras.layers.Activation("relu")(conv1)
pool1 = keras.layers.MaxPool3D(pool_size=(3, 3, 3), strides=1, padding="same")(conv1)
conv2 = ResidualUnitEncode(filters=128, strides=2)(pool1)
pool2 = keras.layers.MaxPool3D(pool_size=(3, 3, 3), strides=1, padding="same")(conv2)
conv3 = ResidualUnitEncode(filters=256, strides=2)(pool2)
pool3 = keras.layers.MaxPool3D(pool_size=(3, 3, 3), strides=1, padding="same")(conv3)
conv4 = ResidualUnitEncode(filters=512, strides=2)(pool3)
pool4 = keras.layers.MaxPool3D(pool_size=(3, 3, 3), strides=1, padding="same")(conv4)
conv5 = ResidualUnitEncode(filters=1024, strides=2)(pool4)
drop5 = keras.layers.Dropout(0.5)(conv5)
up6 = ResidualUnitDecode(filters=512, strides=2)(drop5)
merge6 = keras.layers.concatenate([conv4, up6], axis=4)
conv6 = ResidualUnitEncode(filters=512, strides=2)(merge6)
conv6 = keras.layers.UpSampling3D(size=(2,2,2))(conv6)
up7 = ResidualUnitDecode(filters=256, strides=2)(conv6)
merge7 = keras.layers.concatenate([conv3, up7], axis=4)
conv7 = ResidualUnitEncode(filters=256, strides=2)(merge7)
conv7 = keras.layers.UpSampling3D(size=(2, 2, 2))(conv7)
up8 = ResidualUnitDecode(filters=128, strides=2)(conv7)
merge8 = keras.layers.concatenate([conv2, up8], axis=4)
conv8 = ResidualUnitEncode(filters=128, strides=2)(merge8)
conv8 = keras.layers.UpSampling3D(size=(2, 2, 2))(conv8)
up9 = ResidualUnitDecode(filters=64, strides=2)(conv8)
merge9 = keras.layers.concatenate([conv1, up9], axis=4)
conv9 = ResidualUnitDecode(filters=64, strides=2)(merge9)
conv10 = keras.layers.Conv3D(1,1, strides=(1,1,2),activation="sigmoid")(conv9)
model = keras.Model(inputs, conv10)
model.compile(optimizer=keras.optimizers.Adam(lr=0.001), loss=dice_loss)
model.summary()
return model
Here is the code to run the training using Kfold CV:
image_shape = [256,256,64,2]
dataset = tf.data.TFRecordDataset('train.tfrecord').map(parse_record).batch(69)
nx = tf.compat.v1.data.make_one_shot_iterator(dataset)
x, y = nx.get_next()
x_test = x[55:69, ...]
y_test = y[55:69, ...]
x_train = x[0:54, ...]
y_train = y[0:54, ...]
kfold = KFold(n_splits=10, shuffle=True)
fold_no = 1
acc_per_fold = []
loss_per_fold = []
for train, test in kfold.split(x_train, y_train):
model = build_unet(image_shape=image_shape, batch_size=1)
early_stopping = keras.callbacks.EarlyStopping(monitor='val_loss')
model_file_name = './Fold_' + str(fold_no) + '_best_model.h5'
model_checkpoint = keras.callbacks.ModelCheckpoint(model_file_name, monitor='val_loss')
log_dir_name = './Fold_' + str(fold_no) + '_log_dir'
tb = keras.callbacks.TensorBoard(log_dir_name)
print('------------------------------------------------------------------------')
print(f'Training for fold {fold_no} ...')
train_id_rows = tf.constant(train.reshape(-1,1))
test_id_rows = tf.constant(test.reshape(-1,1))
x_train_train = tf.gather_nd(x_train, train_id_rows)
y_train_train = tf.gather_nd(y_train, train_id_rows)
x_train_test = tf.gather_nd(x_train, test_id_rows)
y_train_test = tf.gather_nd(y_train, test_id_rows)
history = model.fit(x_train_train, y_train_train, epochs=N_EPOCHS, callbacks=[tb, model_checkpoint, early_stopping], batch_size=1)
scores = model.evaluate(x_train_test, y_train_test, verbose=0)
acc_per_fold.append(scores[1] * 100)
loss_per_fold.append(scores[0])
fold_no = fold_no + 1
There are 69 total samples in the dataset, 54 used for the training/validation loop.
I am trying to run a u-net to mask the image data with its labels. Data management part in code went well. Here i am struggling with this code when i run unet.py. Code execute and throw warning and further process stop without getting into results. wanted to know if its library version issue? Didnt find anything similar to it. In the later line i have debugged, Still not giving out any clue. The code execution stops at "Using TensorFlow backend". without giving any error and does proceed to training the model and saving model phase. All the directory structure has been checked and in correct order.
from tensorflow.keras.models import *
from tensorflow.keras.layers import *
from tensorflow.keras.optimizers import *
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.preprocessing.image import array_to_img
import cv2
from data import *
class myUnet(object):
def __init__(self, img_rows=512, img_cols=512):
self.img_rows = img_rows
self.img_cols = img_cols
def load_data(self):
mydata = dataProcess(self.img_rows, self.img_cols)
imgs_train, imgs_mask_train = mydata.load_train_data()
imgs_test = mydata.load_test_data()
return imgs_train, imgs_mask_train, imgs_test
def get_unet(self):
inputs = Input((self.img_rows, self.img_cols, 3))
conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(inputs)
# print(conv1)
conv1 = BatchNormalization()(conv1)
print ("conv1 shape:", conv1.shape)
conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)
conv1 = BatchNormalization()(conv1)
print ("conv1 shape:", conv1.shape)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
print ("pool1 shape:", pool1.shape)
conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)
print ("conv2 shape:", conv2.shape)
conv2 = BatchNormalization()(conv2)
conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)
print ("conv2 shape:", conv2.shape)
conv2 = BatchNormalization()(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
print ("pool2 shape:", pool2.shape)
conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)
print ("conv3 shape:", conv3.shape)
conv3 = BatchNormalization()(conv3)
conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)
print ("conv3 shape:", conv3.shape)
conv3 = BatchNormalization()(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
print ("pool3 shape:", pool3.shape)
conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)
conv4 = BatchNormalization()(conv4)
conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)
conv4 = BatchNormalization()(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool4)
conv5 = BatchNormalization()(conv5)
conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)
conv5 = BatchNormalization()(conv5)
drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(512, 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(drop5))
up6 = BatchNormalization()(up6)
merge6 = concatenate([drop4, up6], axis=3)
print(up6)
print(merge6)
conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge6)
print(conv6)
conv6 = BatchNormalization()(conv6)
conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv6)
print(conv6)
conv6 = BatchNormalization()(conv6)
up7 = Conv2D(256, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
UpSampling2D(size=(2, 2))(conv6))
up7 = BatchNormalization()(up7)
merge7 = concatenate([conv3, up7], axis=3)
print(up7)
print(merge7)
conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge7)
conv7 = BatchNormalization()(conv7)
print(conv7)
conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)
print(conv7)
conv7 = BatchNormalization()(conv7)
up8 = Conv2D(128, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
UpSampling2D(size=(2, 2))(conv7))
up8 = BatchNormalization()(up8)
merge8 = concatenate([conv2, up8], axis=3)
conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge8)
conv8 = BatchNormalization()(conv8)
conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv8)
conv8 = BatchNormalization()(conv8)
up9 = Conv2D(64, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
UpSampling2D(size=(2, 2))(conv8))
up9 = BatchNormalization()(up9)
merge9 = concatenate([conv1, up9], axis=3)
print(up9)
print(merge9)
conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge9)
conv9 = BatchNormalization()(conv9)
print(conv9)
conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
conv9 = BatchNormalization()(conv9)
print(conv9)
conv9 = Conv2D(2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
conv9 = BatchNormalization()(conv9)
print ("conv9 shape:", conv9.shape)
conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)
print(conv10)
model = Model(inputs=inputs, outputs=conv10)
model.compile(optimizer=Adam(lr=1e-4), loss='binary_crossentropy', metrics=['accuracy'])
return model
def train(self):
print("loading data")
imgs_train, imgs_mask_train, imgs_test = self.load_data()
print("loading data done")
model = self.get_unet()
print("got unet")
model_checkpoint = ModelCheckpoint('unet.hdf5', monitor='loss', verbose=1, save_best_only=True)
print('Fitting model...')
model.fit(imgs_train, imgs_mask_train, batch_size=4, epochs=100, verbose=1,
validation_split=0.2, shuffle=True, callbacks=[model_checkpoint])
print('predict test data')
imgs_mask_test = model.predict(imgs_test, batch_size=1, verbose=1)
np.save('./data/results/imgs_mask_test.npy', imgs_mask_test)
def save_img(self):
print("array to image")
imgs = np.load('./data/results/imgs_mask_test.npy')
piclist = []
for line in open("./data/results/pic.txt"):
line = line.strip()
picname = line.split('/')[-1]
piclist.append(picname)
print(len(piclist))
for i in range(imgs.shape[0]):
path = "./data/results/" + piclist[i]
img = imgs[i]
img = array_to_img(img)
img.save(path)
cv_pic = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
cv_pic = cv2.resize(cv_pic,(1918,1280),interpolation=cv2.INTER_CUBIC)
binary, cv_save = cv2.threshold(cv_pic, 127, 255, cv2.THRESH_BINARY)
cv2.imwrite(path, cv_save)
def load_model_weights(self, model):
model.load_weights('./data/unet.hdf5')
if __name__ == '__main__':
print("going to create model")
myunet = myUnet()
print("model created.. going to retreive model")
model = myunet.get_unet()
# model.summary()
# plot_model(model, to_file='model.png')
# myunet.load_model_weights(model)
print("train model")
myunet.train()
print("save model")
myunet.save_img()
WARNING:tensorflow:From unet.py:4: The name tf.keras.layers.CuDNNGRU is deprecated. Please use tf.compat.v1.keras.layers.CuDNNGRU instead.
WARNING:tensorflow:From unet.py:4: The name tf.keras.layers.CuDNNLSTM is deprecated. Please use tf.compat.v1.keras.layers.CuDNNLSTM instead.
Using TensorFlow backend.