Related
I am building a reinforcement learning model.
I am trying to use PRelu in my 2D Conv model using tensorflow.
Here is the code for Actor Model.
code:
from tensorflow.keras.layers import Conv2D, Input, MaxPool1D, concatenate, Lambda, Dense, Flatten
import tensorflow as tf
# activation = tf.keras.layers.LeakyReLU(alpha=0.5)
activation = tf.keras.layers.PReLU(alpha_initializer=tf.initializers.constant(0.25))
def ActorNetwork(input_shape_A,input_shape_B, n_actions):
input_layer_A = Input(shape=input_shape_A[1:], name="input_layer_A")
input_layer_B = Input(shape=input_shape_B[1:], name="input_layer_B")
Rescale = Lambda(lambda x: tf.divide(tf.subtract(x, tf.reduce_max(x)), tf.subtract(tf.reduce_max(x), tf.reduce_min(x))))(input_layer_A)
Conv1 = Conv2D(32, 3, activation= activation, padding='same', name="Conv1")(Rescale)
Conv2 = Conv2D(32, 3, activation=activation, padding='same', name="Conv2")(Conv1)
Conv_pool_1 = Conv2D(32, 2, strides=2, activation='relu', padding='same', name="Conv_pool_1")(Conv2)
Batchnorm_1 = tf.keras.layers.BatchNormalization(name='Batchnorm_1')(Conv_pool_1)
Conv3 = Conv2D(32, 3, activation= activation, padding='same', name="Conv3")(Batchnorm_1)
Conv4 = Conv2D(32, 3, activation=activation, padding='same', name="Conv4")(Conv3)
Conv_pool_2 = Conv2D(32, 2, strides=2, activation='relu', padding='same', name="Conv_pool_2")(Conv4)
Batchnorm_2 = tf.keras.layers.BatchNormalization(name='Batchnorm_2')(Conv_pool_2)
Conv5 = Conv2D(64, 3, activation= activation, padding='same', name="Conv5")(Batchnorm_2)
Conv6 = Conv2D(64, 3, activation=activation, padding='same', name="Conv6")(Conv5)
Conv_pool_3 = Conv2D(64, 2, strides=2, activation='relu', padding='same', name="Conv_pool_3")(Conv6)
Batchnorm_3 = tf.keras.layers.BatchNormalization(name='Batchnorm_3')(Conv_pool_3)
Conv7 = Conv2D(64, 3, activation= activation, padding='same', name="Conv7")(Batchnorm_3)
Conv8 = Conv2D(64, 3, activation=activation, padding='same', name="Conv8")(Conv7)
Conv_pool_4 = Conv2D(64, 2, strides=2, activation='relu', padding='same', name="Conv_pool_4")(Conv8)
Batchnorm_4 = tf.keras.layers.BatchNormalization(name='Batchnorm_4')(Conv_pool_4)
Conv9 = Conv2D(128, 3, activation= activation, padding='same', name="Conv9")(Batchnorm_4)
Conv10 = Conv2D(128, 3, activation=activation, padding='same', name="Conv10")(Conv9)
Conv_pool_5 = Conv2D(128, 2, strides=2, activation='relu', padding='same', name="Conv_pool_5")(Conv10)
Batchnorm_5 = tf.keras.layers.BatchNormalization(name='Batchnorm_5')(Conv_pool_5)
Conv11 = Conv2D(128, 3, activation= activation, padding='same', name="Conv11")(Batchnorm_5)
Conv12 = Conv2D(128, 3, activation=activation, padding='same', name="Conv12")(Conv11)
Conv_pool_6 = Conv2D(128, 2, strides=2, activation='relu', padding='same', name="Conv_pool_6")(Conv12)
Batchnorm_6 = tf.keras.layers.BatchNormalization(name='Batchnorm_6')(Conv_pool_6)
Conv_pool_7 = Conv2D(128, 1, strides=1, activation='relu', padding='same', name="Conv_pool_7")(Batchnorm_6)
Conv_pool_8 = Conv2D(64, 1, strides=1, activation='relu', padding='same', name="Conv_pool_8")(Conv_pool_7)
Conv_pool_9 = Conv2D(32, 1, strides=1, activation='relu', padding='same', name="Conv_pool_9")(Conv_pool_8)
flatten = Flatten()(Conv_pool_9)
Concat_2 = tf.keras.layers.concatenate([flatten, input_layer_B], axis=-1,name='Concat_2')
fc1 = Dense(8194, activation='relu', name="fc1")(Concat_2)
fc2 = Dense(4096, activation='relu', name="fc2")(fc1)
fc3 = Dense(n_actions, activation='softmax', name="fc3")(fc2)
return tf.keras.models.Model(inputs=[input_layer_A,input_layer_B], outputs = fc3, name="actor_model")
model=ActorNetwork((1,1000,4000,1),(1,2),3)
model.compile()
model.summary()
print(model([tf.random.uniform((1,1000,4000,1)),tf.random.uniform((1,2))]))
tf.keras.utils.plot_model(model, show_shapes=True)
I works fine with LeakyRelu but when i use Prelu i throws error related to dimensions. I dont understand it
Error:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-17-0a596da4bc68> in <module>
131
132
--> 133 model=ActorNetwork((1,1000,4000,1),(1,2),3)
134 model.compile()
135 model.summary()
2 frames
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/ops.py in _create_c_op(graph, node_def, inputs, control_inputs, op_def)
2011 except errors.InvalidArgumentError as e:
2012 # Convert to ValueError for backwards compatibility.
-> 2013 raise ValueError(e.message)
2014
2015 return c_op
ValueError: Exception encountered when calling layer "p_re_lu_10" (type PReLU).
Dimensions must be equal, but are 1000 and 500 for '{{node Conv3/p_re_lu_10/mul}} = Mul[T=DT_FLOAT](Conv3/p_re_lu_10/Neg, Conv3/p_re_lu_10/Relu_1)' with input shapes: [1000,4000,32], [?,500,2000,32].
Call arguments received:
• inputs=tf.Tensor(shape=(None, 500, 2000, 32), dtype=float32)
What am i doing wrong here?
The PReLu activation function maintains a learnable parameter alpha that has the same shape as the input of the function. You can read more in the documentation.
You need to define a new layer each time you want to use that activation function.
i.e
Conv1 = Conv2D(32, 3, activation=None, padding='same', name="Conv1")(Rescale)
Conv1_p_relu = tf.keras.layers.PReLU(alpha_initializer=tf.initializers.constant(0.25))(Conv1)
Conv2 = Conv2D(32, 3, activation=None, padding='same', name="Conv2")(Conv1_p_relu)
Conv2_p_relu = tf.keras.layers.PReLU(alpha_initializer=tf.initializers.constant(0.25))(Conv2)
Conv_pool_1 = Conv2D(32, 2, strides=2, activation='relu', padding='same', name="Conv_pool_1")(Conv2_p_relu)
I want to train an autoencoder for the purpose of gpr investigations.
The input data dimension is 149x8.However, While i am trying deep autoencoder it works fine
input_img = Input(shape=(8,))
encoded1 = Dense(8, activation='relu')(input_img)
encoded2 = Dense(4, activation='relu')(encoded1)
encoded3 = Dense(2, activation='relu' )(encoded2)
decoded1 = Dense(2, activation='relu' )(encoded3)
decoded2 = Dense(4, activation='relu')(decoded1)
decoded3 = Dense(8, activation='relu' )(decoded2)
decoded = Dense(8, activation='linear')(decoded3)
autoencoder = Model(input_img, decoded)
sgd = optimizers.Adam(lr=0.001)
autoencoder.compile(optimizer=sgd, loss='mse')
autoencoder.summary()
..................................................
But while trying to use convolutional autoencoder for the same input
it gives error `ValueError: Input 0 is incompatible with layer conv2d_1: expected ndim=4, found ndim=2`
can anybody suggest me how to overcome this problem.
My code is
input_img = Input(shape=(8,))
x = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(input_img)
x = layers.MaxPooling2D((2, 2), padding='same')(x)
x = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = layers.MaxPooling2D((2, 2), padding='same')(x)
x = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(x)
encoded = layers.MaxPooling2D((2, 2), padding='same')(x)
x = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(encoded)
x = layers.UpSampling2D((2, 2))(x)
x = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = layers.UpSampling2D((2, 2))(x)
x = layers.Conv2D(16, (3, 3), activation='relu')(x)
x = layers.UpSampling2D((2, 2))(x)
decoded = layers.Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)
autoencoder = Model(input_img, decoded)
sgd = optimizers.Adam(lr=0.001)
autoencoder.compile(optimizer=sgd, loss='mse')
autoencoder.summary()
Wrong Input Shape:
This is because we are passing the input shape of (8,) and 1 extra dimension added by TensorFlow for Batch size, so the error message says that it found ndim=3, but the CNN has expected min_ndim=4, 3 for the image size and 1 for the batch size. e.g.
input_shape=(number_of_rows, 28,28,1)
I am trying to do a multi-class classification project with CNN. My issue is getting good accuracy but not predicting well on validation data. I have introduced l2 regularization but it is not generalizing well. Also tried with different l2 regularization values (1e-3, 1e-4)
Here is my Accuracy graph and Loss graph.
Topology:
model = Sequential()
inputs = keras.Input(shape=(512, 512, 3), name="img")
x = Conv2D(32, kernel_size=(3,3), strides=(1,1), kernel_regularizer=l2(1e-5), padding='same')
x = BatchNormalization()(x)
x1 = Activation('relu')(x)
x2 = Conv2D(32, kernel_size=(3,3), strides=(1,1), kernel_regularizer=l2(1e-5), padding='same'
(x1)
x = BatchNormalization()(x2)
x = Activation('relu')(x2)
x3 = Conv2D(32, kernel_size=(3,3), strides=(1,1), kernel_regularizer=l2(1e-5), padding='same')
(x)
x = BatchNormalization()(x3)
x = tensorflow.keras.layers.add([x, x1]) # ==> Shortcut
x = Activation('relu')(x)
x4 = Conv2D(64, kernel_size=(3,3), strides=(2,2), kernel_regularizer=l2(1e-5),padding='same')(x)
x = BatchNormalization()(x4)
x = Activation('relu')(x)
x5 = Conv2D(64, kernel_size=(3,3), strides=(1,1), kernel_regularizer=l2(1e-5), padding='same')
(x)
x = BatchNormalization()(x5)
x = Activation('relu')(x)
x6 = Conv2D(64, kernel_size=(3,3), strides=(1,1), kernel_regularizer=l2(1e-5), padding='same')
(x)
x = BatchNormalization()(x6)
x = tensorflow.keras.layers.add([x, x4]) # ==> Shortcut
x = Activation('relu')(x)
x7 = Conv2D(128, kernel_size=(3,3), strides=(2,2), kernel_regularizer=l2(1e-5), padding='same')
(x)
x = BatchNormalization()(x7)
x = Activation('relu')(x)
x8 = Conv2D(128, kernel_size=(3,3), strides=(1,1), kernel_regularizer=l2(1e-5), padding='same')
(x)
x = BatchNormalization()(x8)
x = Activation('relu')(x)
x9 = Conv2D(128, kernel_size=(3,3), strides=(1,1), kernel_regularizer=l2(1e-5), padding='same')
(x)
x = BatchNormalization()(x9)
x = tensorflow.keras.layers.add([x, x7]) #
x = Activation('relu')(x)
x10 = Conv2D(256, kernel_size=(3,3), strides=(1,1), kernel_regularizer=l2(1e-5), padding='same')
(x)
x = BatchNormalization()(x10)
x = Activation('relu')(x)
x11 = Conv2D(256, kernel_size=(3,3) , strides=(1,1), kernel_regularizer=l2(1e-5),padding='same')
(x)
x = BatchNormalization()(x11)
x = Activation('relu')(x)
x12 = Conv2D(256, kernel_size=(3,3), strides=(1,1), kernel_regularizer=l2(1e-5), padding='same')
(x)
x = BatchNormalization()(x12)
x = tensorflow.keras.layers.add([x, x10]) #
x = Activation('relu')(x)
x13 = Conv2D(512, kernel_size=(3,3), strides=(1,1), kernel_regularizer=l2(1e-5), padding='same')
(x)
x = BatchNormalization()(x13)
x = Activation('relu')(x)
x14 = Conv2D(512, kernel_size=(3,3), strides=(1,1),kernel_regularizer=l2(1e-5), padding='same')
(x)
x = BatchNormalization()(x14)
x = Activation('relu')(x)
x15 = Conv2D(512, kernel_size=(3,3), strides=(1,1), kernel_regularizer=l2(1e-5), padding='same')(x)
x = BatchNormalization()(x15)
x = tensorflow.keras.layers.add([x, x13]) #
x = Activation('relu')(x)
x = Flatten()(Conv2D(1, kernel_size=1, strides=(1,1), kernel_regularizer=l2(1e-5),
padding='same')(x))
x = layers.Dropout(0.3)(x)
outputs = Dense(4, activation ='softmax', kernel_initializer ='he_normal')(x)
model = Model(inputs, outputs)
model.summary()
`
I tried with different filters, decreasing/increasing layers. Is this issue is because of overfitting? Any suggestion on what I can improve so that I get smoother curve and good predictions.
You could try putting dropouts in the Conv2D layers too, that should help with some of the overfitting.
Decrease the alpha (learning rate of the optimizers), so that the optima doesn't overshoot.
Should help :)
I am trying to run a u-net to mask the image data with its labels. Data management part in code went well. Here i am struggling with this code when i run unet.py. Code execute and throw warning and further process stop without getting into results. wanted to know if its library version issue? Didnt find anything similar to it. In the later line i have debugged, Still not giving out any clue. The code execution stops at "Using TensorFlow backend". without giving any error and does proceed to training the model and saving model phase. All the directory structure has been checked and in correct order.
from tensorflow.keras.models import *
from tensorflow.keras.layers import *
from tensorflow.keras.optimizers import *
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.preprocessing.image import array_to_img
import cv2
from data import *
class myUnet(object):
def __init__(self, img_rows=512, img_cols=512):
self.img_rows = img_rows
self.img_cols = img_cols
def load_data(self):
mydata = dataProcess(self.img_rows, self.img_cols)
imgs_train, imgs_mask_train = mydata.load_train_data()
imgs_test = mydata.load_test_data()
return imgs_train, imgs_mask_train, imgs_test
def get_unet(self):
inputs = Input((self.img_rows, self.img_cols, 3))
conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(inputs)
# print(conv1)
conv1 = BatchNormalization()(conv1)
print ("conv1 shape:", conv1.shape)
conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)
conv1 = BatchNormalization()(conv1)
print ("conv1 shape:", conv1.shape)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
print ("pool1 shape:", pool1.shape)
conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)
print ("conv2 shape:", conv2.shape)
conv2 = BatchNormalization()(conv2)
conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)
print ("conv2 shape:", conv2.shape)
conv2 = BatchNormalization()(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
print ("pool2 shape:", pool2.shape)
conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)
print ("conv3 shape:", conv3.shape)
conv3 = BatchNormalization()(conv3)
conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)
print ("conv3 shape:", conv3.shape)
conv3 = BatchNormalization()(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
print ("pool3 shape:", pool3.shape)
conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)
conv4 = BatchNormalization()(conv4)
conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)
conv4 = BatchNormalization()(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool4)
conv5 = BatchNormalization()(conv5)
conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)
conv5 = BatchNormalization()(conv5)
drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(512, 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(drop5))
up6 = BatchNormalization()(up6)
merge6 = concatenate([drop4, up6], axis=3)
print(up6)
print(merge6)
conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge6)
print(conv6)
conv6 = BatchNormalization()(conv6)
conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv6)
print(conv6)
conv6 = BatchNormalization()(conv6)
up7 = Conv2D(256, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
UpSampling2D(size=(2, 2))(conv6))
up7 = BatchNormalization()(up7)
merge7 = concatenate([conv3, up7], axis=3)
print(up7)
print(merge7)
conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge7)
conv7 = BatchNormalization()(conv7)
print(conv7)
conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)
print(conv7)
conv7 = BatchNormalization()(conv7)
up8 = Conv2D(128, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
UpSampling2D(size=(2, 2))(conv7))
up8 = BatchNormalization()(up8)
merge8 = concatenate([conv2, up8], axis=3)
conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge8)
conv8 = BatchNormalization()(conv8)
conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv8)
conv8 = BatchNormalization()(conv8)
up9 = Conv2D(64, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
UpSampling2D(size=(2, 2))(conv8))
up9 = BatchNormalization()(up9)
merge9 = concatenate([conv1, up9], axis=3)
print(up9)
print(merge9)
conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge9)
conv9 = BatchNormalization()(conv9)
print(conv9)
conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
conv9 = BatchNormalization()(conv9)
print(conv9)
conv9 = Conv2D(2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
conv9 = BatchNormalization()(conv9)
print ("conv9 shape:", conv9.shape)
conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)
print(conv10)
model = Model(inputs=inputs, outputs=conv10)
model.compile(optimizer=Adam(lr=1e-4), loss='binary_crossentropy', metrics=['accuracy'])
return model
def train(self):
print("loading data")
imgs_train, imgs_mask_train, imgs_test = self.load_data()
print("loading data done")
model = self.get_unet()
print("got unet")
model_checkpoint = ModelCheckpoint('unet.hdf5', monitor='loss', verbose=1, save_best_only=True)
print('Fitting model...')
model.fit(imgs_train, imgs_mask_train, batch_size=4, epochs=100, verbose=1,
validation_split=0.2, shuffle=True, callbacks=[model_checkpoint])
print('predict test data')
imgs_mask_test = model.predict(imgs_test, batch_size=1, verbose=1)
np.save('./data/results/imgs_mask_test.npy', imgs_mask_test)
def save_img(self):
print("array to image")
imgs = np.load('./data/results/imgs_mask_test.npy')
piclist = []
for line in open("./data/results/pic.txt"):
line = line.strip()
picname = line.split('/')[-1]
piclist.append(picname)
print(len(piclist))
for i in range(imgs.shape[0]):
path = "./data/results/" + piclist[i]
img = imgs[i]
img = array_to_img(img)
img.save(path)
cv_pic = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
cv_pic = cv2.resize(cv_pic,(1918,1280),interpolation=cv2.INTER_CUBIC)
binary, cv_save = cv2.threshold(cv_pic, 127, 255, cv2.THRESH_BINARY)
cv2.imwrite(path, cv_save)
def load_model_weights(self, model):
model.load_weights('./data/unet.hdf5')
if __name__ == '__main__':
print("going to create model")
myunet = myUnet()
print("model created.. going to retreive model")
model = myunet.get_unet()
# model.summary()
# plot_model(model, to_file='model.png')
# myunet.load_model_weights(model)
print("train model")
myunet.train()
print("save model")
myunet.save_img()
WARNING:tensorflow:From unet.py:4: The name tf.keras.layers.CuDNNGRU is deprecated. Please use tf.compat.v1.keras.layers.CuDNNGRU instead.
WARNING:tensorflow:From unet.py:4: The name tf.keras.layers.CuDNNLSTM is deprecated. Please use tf.compat.v1.keras.layers.CuDNNLSTM instead.
Using TensorFlow backend.
I'm getting a weird error when I try to create a network using an upsampling layer, when I manually set the interpolate keyword to bilinear.
If I leave it out, and go with the default of 'nearest neighbour; it works fine.
Does anyone know what's up?
Code for model. Error is thrown at layer 'up1'
def build_model(self):
chnl4_input = Input(shape=(368, 256, 4))
chnl3_input = Input(shape=(736, 512, 3))
conv1 = Conv2D(26, self.kernel_size, activation='relu', padding='same')(chnl4_input)
conv2 = Conv2D(26, self.kernel_size, strides=(2, 2), activation='relu', padding='same')(conv1)
conv5 = Conv2D(64, self.kernel_size, activation='relu', padding='same')(conv2)
conv6 = Conv2D(64, self.kernel_size, activation='relu', padding='same')(conv5)
up1 = concatenate([UpSampling2D(size=(2, 2), interpolation='bilinear')(conv6), conv1], axis=-1)
conv7 = Conv2D(64, self.kernel_size, activation='relu', padding='same')(up1)
conv8 = Conv2D(64, self.kernel_size, activation='relu', padding='same')(conv7)
conv9 = Conv2D(64, self.kernel_size, activation='relu', padding='same')(conv8)
conv11 = Conv2D(64, self.kernel_size, activation='relu', padding='same')(conv9)
conv12 = Conv2D(64, self.kernel_size, activation='relu', padding='same')(conv11)
up3 = concatenate([UpSampling2D(size=(2, 2), interpolation='bilinear')(conv12), chnl3_input], axis=-1)
conv13 = Conv2D(67, self.kernel_size, activation='relu', padding='same')(up3)
conv14 = Conv2D(67, self.kernel_size, activation='relu', padding='same')(conv13)
conv15 = Conv2D(32, self.kernel_size, activation='relu', padding='same')(conv14)
conv16 = Conv2D(3, self.kernel_size, activation='relu', padding='same')(conv15)
out = conv16
self.model = Model(inputs=[chnl4_input, chnl3_input], outputs=[out])
self.model.compile(optimizer=self.optimizer_func, loss=self.loss_func)
self.model.name = 'UNET'
return self.modele here
Error : TypeError: ('Keyword argument not understood:', 'interpolation')
~/MastersWork/Fergal/Scripts/models.py in build_model(self)
29 conv6 = Conv2D(64, self.kernel_size, activation='relu', padding='same')(conv5)
30
---> 31 up1 = concatenate([UpSampling2D(size=(2, 2), interpolation='bilinear')(conv6), conv1], axis=-1)
32 conv7 = Conv2D(64, self.kernel_size, activation='relu', padding='same')(up1)
33
~/anaconda3/envs/rhys_tensorflow/lib/python3.6/site-packages/keras/legacy/interfaces.py in wrapper(*args, **kwargs)
89 warnings.warn('Update your `' + object_name +
90 '` call to the Keras 2 API: ' + signature, stacklevel=2)
---> 91 return func(*args, **kwargs)
92 wrapper._original_function = func
93 return wrapper
~/anaconda3/envs/rhys_tensorflow/lib/python3.6/site-packages/keras/layers/convolutional.py in __init__(self, size, data_format, **kwargs)
1804 #interfaces.legacy_upsampling2d_support
1805 def __init__(self, size=(2, 2), data_format=None, **kwargs):
-> 1806 super(UpSampling2D, self).__init__(**kwargs)
1807 self.data_format = conv_utils.normalize_data_format(data_format)
1808 self.size = conv_utils.normalize_tuple(size, 2, 'size')
~/anaconda3/envs/rhys_tensorflow/lib/python3.6/site-packages/keras/engine/topology.py in __init__(self, **kwargs)
291 for kwarg in kwargs:
292 if kwarg not in allowed_kwargs:
--> 293 raise TypeError('Keyword argument not understood:', kwarg)
294 name = kwargs.get('name')
295 if not name:
For reference, the Keras page regarding upSampling2D
https://www.tensorflow.org/api_docs/python/tf/keras/layers/UpSampling2D
def bilinear_upsameple(tensor, size):
y = tf.image.resize_bilinear(images=tensor, size=size)
return y
dims = K.int_shape(input_tensor)
y_scaled = Lambda(lambda x : bilinear_upsameple(tensor=x, size=(dims[1]*scale, dims[2]*scale)))(input_tensor)
here is a work around for bilinear upsampling, using lambda layer and tf.image.resize_bilinear
works fine on tf 1.12.0