Test of functional API fails - tensorflow2.0

For testing i tried to convert the generator of https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r2/tutorials/generative/dcgan.ipynb#scrollTo=6bpTcDqoLWjY into functional API, but it does not work.
ValueError: setting an array element with a sequence.
Does someone know what i do wrong?
I replace the generator code with:
def make_generator_model():
inputs = tf.keras.Input(shape=(100,))
l = layers.Dense(7*7*256, use_bias=False)(inputs)
#l1 = layers.BatchNormalization(l)
l2 = layers.LeakyReLU(l)
l3 = layers.Reshape((7, 7, 256))(l2)
l4 = layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False)(l3)
#l5 = layers.BatchNormalization()(l4)
l6 = layers.LeakyReLU()(l4)
l7 = layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False)(l6)
#l8 = layers.BatchNormalization()(l7)
l9 = layers.LeakyReLU()(l7)
l10 = layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh')(l9)
return tf.keras.Model(inputs=inputs, outputs=l10)

The error comes from missing () after LeakyRely()() . The working code is:
def make_generator_model():
inputs = tf.keras.Input(shape=(100,))
l = layers.Dense(7*7*256, use_bias=False)(inputs)
l1 = layers.BatchNormalization()(l)
l2 = layers.LeakyReLU()(l1)
l3 = layers.Reshape((7, 7, 256))(l2)
l4 = layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False)(l3)
l5 = layers.BatchNormalization()(l4)
l6 = layers.LeakyReLU()(l5)
l7 = layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False)(l6)
l8 = layers.BatchNormalization()(l7)
l9 = layers.LeakyReLU()(l8)
l10 = layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh')(l9)
return tf.keras.Model(inputs=inputs, outputs=l10)

Related

implementing unpooling layer in u-net, InvalidArgumentError is occurred

I am using EM_dataset segmentation,
keras2.3.1 and tensorflow 2,
in google colab
Here is my code. This is a U-net.
def unpool(pool, ind, ksize=[1,2,2,1], scope='unpool'):
"""
Unpooling layer after max_pool_with_argmax.
Args:
pool: max pooled output tensor
ind: argmax indices
ksize: ksize is the same as for the pool
Return:
ret: unpooling tensor
"""
with tf.compat.v1.variable_scope(scope):
input_shape = tf.shape(pool)
output_shape = [input_shape[0], input_shape[1] * ksize[1], input_shape[2] * ksize[2], input_shape[3]]
flat_input_size = tf.reduce_prod(input_shape)
flat_output_shape = [output_shape[0], output_shape[1] * output_shape[2] * output_shape[3]]
pool_ = tf.reshape(pool, [flat_input_size])
batch_range = tf.reshape(tf.range(tf.cast(output_shape[0], tf.int64), dtype=ind.dtype), shape=[input_shape[0], 1, 1, 1])
b = tf.ones_like(ind) * batch_range
b1 = tf.reshape(b, [flat_input_size, 1])
ind_ = tf.reshape(ind, [flat_input_size, 1])
ind_ = tf.concat([b1, ind_], 1)
ret = tf.scatter_nd(ind_, pool_, shape=tf.cast(flat_output_shape, tf.int64))
ret = tf.reshape(ret, output_shape)
set_input_shape = pool.get_shape()
set_output_shape = [set_input_shape[0], set_input_shape[1] * ksize[1], set_input_shape[2] * ksize[2], set_input_shape[3]]
ret.set_shape(set_output_shape)
#print(set_output_shape)
return ret
also I am using ImageDataGenerator in keras
inputs = Input(shape=(160, 160, 1))
# encorder
c1 = layers.BatchNormalization()(inputs)
c1 = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(c1)
c1 = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(c1)
#p1 = layers.MaxPooling2D((2, 2))(c1) # 80 80
p1, argmax1 = tf.nn.max_pool_with_argmax(input=c1, ksize=(1,2,2,1), strides=(1,2,2,1), padding='SAME')
c2 = layers.BatchNormalization()(p1)
c2 = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(c2)
c2 = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(c2)
#p2 = layers.MaxPooling2D((2, 2))(c2) # 40 40
p2, argmax2 = tf.nn.max_pool_with_argmax(input=c2, ksize=(1,2,2,1), strides=(1,2,2,1), padding='SAME')
c3 = layers.BatchNormalization()(p2)
c3 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(c3)
c3 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(c3)
#p3 = layers.MaxPooling2D((2, 2))(c3) # 20 20
p3, argmax3 = tf.nn.max_pool_with_argmax(input=c3, ksize=(1,2,2,1), strides=(1,2,2,1), padding='SAME')
c4 = layers.BatchNormalization()(p3)
c4 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(c4)
c4 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(c4)
#p4 = layers.MaxPooling2D(pool_size=(2, 2))(c4) # 10 10
p4, argmax4 = tf.nn.max_pool_with_argmax(input=c4, ksize=(1,2,2,1), strides=(1,2,2,1), padding='SAME')
c5 = layers.BatchNormalization()(p4)
c5 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(c5)
c5 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(c5)
#p5 = layers.MaxPooling2D(pool_size=(2, 2))(c5) # 5 5
p5, argmax5 = tf.nn.max_pool_with_argmax(input=c5, ksize=(1,2,2,1), strides=(1,2,2,1), padding='SAME')
c55 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(p5)
c55 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(c55)
u6 = layers.concatenate([unpool(c55, argmax5), c5])
c6 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(u6)
c6 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(c6)
u71 = layers.concatenate([unpool(c6,argmax4), c4])
c71 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(u71)
c61 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(c71)
u7 = layers.concatenate([unpool(c61,argmax3), c3])
c7 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(u7)
c7 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(c7)
u8 = layers.concatenate([unpool(c7,argmax2), c2])
c8 = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(u8)
c8 = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(c8)
u9 = layers.concatenate([unpool(c8,argmax1), c1], axis=3)
c9 = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(u9)
c9 = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(c9)
outputs = layers.Conv2D(1, (1, 1), activation='sigmoid')(c9)
Network is fine but If i do fit, Error is occured
InvalidArgumentError: Input to reshape is a tensor with 12800 values, but the requested shape has 25600
[[node functional_33/tf_op_layer_Reshape_393/Reshape_393 (defined at :4) ]] [Op:__inference_train_function_56708]
I don't know why this error is occured...
please some help...
As the error suggests that it requires input image size to be 160x160 = 25600 values. but the images given input to the model aren't of the required size. you can resize all the images that are required to fit into the model to 160x160x1 and then fit them to model, it should work then.

model does not generalize in test data using U-net

I'm working in Multi-class segmentation of medical images using U-net. I have small dataset equal to 681 sample and 681 GT.
inputs = tf.keras.layers.Input((IMG_WIDHT, IMG_HEIGHT, IMG_CHANNELS))
smooth = 1.
s = tf.keras.layers.Lambda(lambda x: x / 255)(inputs)
c1 = tf.keras.layers.Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(
s) # Kernelsize : start with some weights initial value
c1 = tf.keras.layers.Dropout(0.1)(c1)
c1 = tf.keras.layers.Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(
c1) # Kernelsize : start with some weights initial value
p1 = tf.keras.layers.MaxPool2D((2, 2))(c1)
c2 = tf.keras.layers.Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(
p1) # Kernelsize : start with some weights initial value
c2 = tf.keras.layers.Dropout(0.1)(c2)
c2 = tf.keras.layers.Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(
c2) # Kernelsize : start with some weights initial value
p2 = tf.keras.layers.MaxPool2D((2, 2))(c2)
c3 = tf.keras.layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(
p2) # Kernelsize : start with some weights initial value
c3 = tf.keras.layers.Dropout(0.1)(c3)
c3 = tf.keras.layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(
c3) # Kernelsize : start with some weights initial value
p3 = tf.keras.layers.MaxPool2D((2, 2))(c3)
c4 = tf.keras.layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(
p3) # Kernelsize : start with some weights initial value
c4 = tf.keras.layers.Dropout(0.1)(c4)
c4 = tf.keras.layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(
c4) # Kernelsize : start with some weights initial value
p4 = tf.keras.layers.MaxPool2D((2, 2))(c4)
c5 = tf.keras.layers.Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(
p4) # Kernelsize : start with some weights initial value
c5 = tf.keras.layers.Dropout(0.1)(c5)
c5 = tf.keras.layers.Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(
c5) # Kernelsize : start wi
u6 = tf.keras.layers.Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(c5)
u6 = tf.keras.layers.concatenate([u6, c4])
c6 = tf.keras.layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u6)
c6 = tf.keras.layers.Dropout(0.2)(c6)
c6 = tf.keras.layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c6)
u7 = tf.keras.layers.Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(c6)
u7 = tf.keras.layers.concatenate([u7, c3])
c7 = tf.keras.layers.Conv2D(64, (2, 2), activation='relu', kernel_initializer='he_normal', padding='same')(u7)
c7 = tf.keras.layers.Dropout(0.2)(c7)
c7 = tf.keras.layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c7)
u8 = tf.keras.layers.Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(c7)
u8 = tf.keras.layers.concatenate([u8, c2])
c8 = tf.keras.layers.Conv2D(32, (2, 2), activation='relu', kernel_initializer='he_normal', padding='same')(u8)
c8 = tf.keras.layers.Dropout(0.1)(c8)
c8 = tf.keras.layers.Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c8)
u9 = tf.keras.layers.Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same')(c8)
u9 = tf.keras.layers.concatenate([u9, c1], axis=3)
c9 = tf.keras.layers.Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u9)
c9 = tf.keras.layers.Dropout(0.1)(c9)
c9 = tf.keras.layers.Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c9)
outputs = tf.keras.layers.Conv2D(12, (1, 1), activation='softmax')(c9)
to initialize weight I'm using a weighted_categorical_crossentropy
def weighted_categorical_crossentropy(weights):
# weights = [0.9,0.05,0.04,0.01]
def wcce(y_true, y_pred):
Kweights = K.constant(weights)
if not K.is_tensor(y_pred): y_pred = K.constant(y_pred)
y_true = K.cast(y_true, y_pred.dtype)
return K.categorical_crossentropy(y_true, y_pred) * K.sum(y_true * Kweights, axis=-1)
return wcce
losse = weighted_categorical_crossentropy(poids)
hyperparameter
cc = tf.keras.optimizers.Adam(learning_rate=0.0001, beta_1=0.9, beta_2=0.999, amsgrad=False)
model.compile(optimizer=cc, loss=losse,
metrics=['categorical_accuracy'])
history = model.fit(X_train, Y_train, validation_split=0.18, batch_size=1,epochs = 50)
558/558 [==============================] - 333s 597ms/sample - loss: 0.0281 - categorical_accuracy: 0.9539 - val_loss: 0.2262 -
I'm so confused about the result because I found some paper that work with same size of dataset and get a better result.
I tried to modify dropout from 0.1 too 0.4 and 0.2 too 0.5 and get same result.
I tried also adding regularization in softmax layer and before softmax layer and the result is an high loss.
Data augmentation does not work for me since i must preserve the range of pixels and using Keras generator make my GT range of pixels change.
So my question is why my model does not generalize to test data.

How to solve "No Algorithm Worked" Keras Error?

I tried to develop an FCN-16 model in Keras. I initialized the weights with similar FCN-16 model weights.
def FCN8 (nClasses, input_height=256, input_width=256):
## input_height and width must be devisible by 32 because maxpooling with filter size = (2,2) is operated 5 times,
## which makes the input_height and width 2^5 = 32 times smaller
assert input_height % 32 == 0
assert input_width % 32 == 0
IMAGE_ORDERING = "channels_last"
img_input = Input(shape=(input_height, input_width, 3)) ## Assume 224,224,3
## Block 1
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='conv1_1', data_format=IMAGE_ORDERING)(
img_input)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='conv1_2', data_format=IMAGE_ORDERING)(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool', data_format=IMAGE_ORDERING)(x)
f1 = x
# Block 2
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='conv2_1', data_format=IMAGE_ORDERING)(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='conv2_2', data_format=IMAGE_ORDERING)(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool', data_format=IMAGE_ORDERING)(x)
f2 = x
# Block 3
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_1', data_format=IMAGE_ORDERING)(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_2', data_format=IMAGE_ORDERING)(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_3', data_format=IMAGE_ORDERING)(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool', data_format=IMAGE_ORDERING)(x)
pool3 = x
# Block 4
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_1', data_format=IMAGE_ORDERING)(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_2', data_format=IMAGE_ORDERING)(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_3', data_format=IMAGE_ORDERING)(x)
pool4 = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool', data_format=IMAGE_ORDERING)(
x) ## (None, 14, 14, 512)
# Block 5
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv5_1', data_format=IMAGE_ORDERING)(pool4)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv5_2', data_format=IMAGE_ORDERING)(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv5_3', data_format=IMAGE_ORDERING)(x)
pool5 = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool', data_format=IMAGE_ORDERING)(
x)
n = 4096
o = (Conv2D(n, (7, 7), activation='relu', padding='same', name="fc6", data_format=IMAGE_ORDERING))(pool5)
conv7 = (Conv2D(n, (1, 1), activation='relu', padding='same', name="fc7", data_format=IMAGE_ORDERING))(o)
conv7 = (Conv2D(nClasses, (1, 1), activation='relu', padding='same', name="conv7_1", data_format=IMAGE_ORDERING))(conv7)
conv7_4 = Conv2DTranspose(nClasses, kernel_size=(2, 2), strides=(2, 2), data_format=IMAGE_ORDERING)(
conv7)
pool411 = (
Conv2D(nClasses, (1, 1), activation='relu', padding='same', name="pool4_11",use_bias=False, data_format=IMAGE_ORDERING))(pool4)
o = Add(name="add")([pool411, conv7_4])
o = Conv2DTranspose(nClasses, kernel_size=(16, 16), strides=(16, 16), use_bias=False, data_format=IMAGE_ORDERING)(o)
o = (Activation('softmax'))(o)
GDI= Model(img_input, o)
GDI.load_weights(Model_Weights_path)
model = Model(img_input, o)
return model
Then I did train, test split and trying to run the model as:
from keras import optimizers
sgd = optimizers.SGD(lr=1E-2, momentum=0.91,decay=5**(-4), nesterov=True)
model.compile(optimizer='sgd',loss='categorical_crossentropy',metrics=['accuracy'],)
hist1 = model.fit(X_train,y_train,validation_data=(X_test,y_test),batch_size=32,epochs=1000,verbose=2)
model.save("/content/drive/My Drive/HCI_prep/new.h5")
But this code is throwing error in the first epoch:
NotFoundError: 2 root error(s) found.
(0) Not found: No algorithm worked!
[[{{node pool4_11_3/Conv2D}}]]
[[loss_4/mul/_629]]
(1) Not found: No algorithm worked!
[[{{node pool4_11_3/Conv2D}}]]
0 successful operations.
0 derived errors ignored.
add the following to your code:
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
And then restart the python kernel.
Had the same issue.
The padding='same' for MaxPooling didn't work for me.
I changed the color_mode parameter in the train and test generators from 'rgb' to 'grayscale' and then it worked for me.
This worked for me:
import tensorflow as tf
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
In my case, this was solved by ending all processes, that still allocated memory on one of the GPUs. Apparently, one of them did not finish (correctly). I did not have to change any code.
My problem was that I called the model with an input_shape of (?,28,28,1) and later called it with (?,28,28,3).
import tensorflow.keras
from tensorflow.keras.models import *
IMAGE_ORDERING = 'channels_last'
# take vgg-16 pretrained model from "https://github.com/fchollet/deep-learning-models" here
pretrained_url = "https://github.com/fchollet/deep-learning-models/" \
"releases/download/v0.1/" \
"vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5"
pretrained = 'imagenet' # 'imagenet' if weights need to be initialized!
"""
Function Name: get_vgg_encoder()
Functionalities: This function defines the VGG encoder part of the FCN network
and initialize this encoder part with VGG pretrained weights.
Parameter:input_height=224, input_width=224, pretrained=pretrained
Returns: final layer of every blocks as f1,f2,f3,f4,f5
"""
def get_vgg_encoder(input_height=224, input_width=224, pretrained=pretrained):
pad = 1
# heights and weights must be divided by 32, for fcn
assert input_height % 32 == 0
assert input_width % 32 == 0
img_input = Input(shape=(input_height, input_width, 3))
# Unlike base paper, stride=1 has not been used here, because
# Keras has default stride=1
x = (ZeroPadding2D((pad, pad), data_format=IMAGE_ORDERING))(img_input)
x = Conv2D(64, (3, 3), activation='relu', padding='valid', name='block1_conv1', data_format=IMAGE_ORDERING)(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2', data_format=IMAGE_ORDERING)(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool', data_format=IMAGE_ORDERING)(x)
f1 = x
# Block 2
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1', data_format=IMAGE_ORDERING)(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2', data_format=IMAGE_ORDERING)(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool', data_format=IMAGE_ORDERING)(x)
f2 = x
# Block 3
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1', data_format=IMAGE_ORDERING)(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2', data_format=IMAGE_ORDERING)(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3', data_format=IMAGE_ORDERING)(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool', data_format=IMAGE_ORDERING)(x)
x = Dropout(0.5)(x)
f3 = x
# Block 4
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1', data_format=IMAGE_ORDERING)(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2', data_format=IMAGE_ORDERING)(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3', data_format=IMAGE_ORDERING)(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool', data_format=IMAGE_ORDERING)(x)
f4 = x
# Block 5
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1', data_format=IMAGE_ORDERING)(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2', data_format=IMAGE_ORDERING)(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3', data_format=IMAGE_ORDERING)(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool', data_format=IMAGE_ORDERING)(x)
# x= Dropout(0.5)(x)
f5 = x
# Check if weights are initialised, model is learning!
if pretrained == 'imagenet':
VGG_Weights_path = tensorflow.keras.utils.get_file(
pretrained_url.split("/")[-1], pretrained_url)
Model(img_input, x).load_weights(VGG_Weights_path)
return img_input, [f1, f2, f3, f4, f5]
"""
Function Name: fcn_16()
Functionalities: This function defines the Fully Convolutional part of the FCN network
and adds skip connections to build FCN-16 network
Parameter:n_classes, encoder=get_vgg_encoder, input_height=224,input_width=224
Returns: model
"""
def fcn_16(n_classes, encoder=get_vgg_encoder, input_height=224, input_width=224):
# Take levels from the base model, i.e. vgg
img_input, levels = encoder(input_height=input_height, input_width=input_width)
[f1, f2, f3, f4, f5] = levels
o = f5
# fcn6
o = (Conv2D(4096, (7, 7), activation='relu', padding='same', data_format=IMAGE_ORDERING))(o)
o = Dropout(0.5)(o)
# fc7
o = (Conv2D(4096, (1, 1), activation='relu', padding='same', data_format=IMAGE_ORDERING))(o)
o = Dropout(0.3)(o)
conv7 = (Conv2D(1, (1, 1), activation='relu', padding='same', name="score_sal", data_format=IMAGE_ORDERING))(o)
conv7_4 = Conv2DTranspose(1, kernel_size=(4, 4), strides=(2, 2), padding='same', name="upscore_sal2",
use_bias=False, data_format=IMAGE_ORDERING)(conv7)
pool411 = (
Conv2D(1, (1, 1), activation='relu', padding='same', name="score_pool4", data_format=IMAGE_ORDERING))(f4)
# Add a crop layer
o, o2 = crop(pool411, conv7_4, img_input)
# add skip connection
o = Add()([o, o2])
# 16 x upsample
o = Conv2DTranspose(n_classes, kernel_size=(32, 32), strides=(16, 16), use_bias=False, data_format=IMAGE_ORDERING)(
o)
# crop layer
## Caffe calls crop layer that takes o and img_input as argument, it takes their difference and crops
## But keras takes it as touple, I checked the size diff and put this value manually.
## output dim was 240 , input dim was 224. 240-224=16. so 16/2=8
score = Cropping2D(cropping=((8, 8), (8, 8)), data_format=IMAGE_ORDERING)(o)
o = (Activation('sigmoid'))(score)
model = Model(img_input, o)
model.model_name = "fcn_16"
return model
This error is quite general and basically indicates that "something" went wrong. As, the variety of answers suggest the error can arise from incompatibilities of the implementation with the underlying versions of keras/tensorflow, or the filter sizes are incorrect, or or or...
There is no single solution to this. For me, it also was an input shape issue. Instead of using rgb using grayscale worked as the network expected 1 channel.

module 'tensorlayer.layers' has no attribute 'flatten'

I am trying to build a binarised neural network but error is
"module 'tensorlayer.layers' has no attribute 'flatten'"
tf.reset_default_graph()
x = tf.placeholder(tf.float32, [None, 1, 48, 1])
net = tl.layers.InputLayer(x, name='input')
net = tl.layers.BinaryConv2d(net, 32, (5, 5), (1, 1), padding='SAME', name='bcnn1')
net = tl.layers.MaxPool2d(net, (2, 2), (2, 2), padding='SAME', name='pool1')
net = tl.layers.BatchNormLayer(net, act=tl.act.htanh, is_train=True,name='bn1')
net = tl.layers.SignLayer(net)
net = tl.layers.BinaryConv2d(net, 64, (5, 5), (1, 1), padding='SAME', name='bcnn2')
net = tl.layers.MaxPool2d(net, (2, 2), (2, 2), padding='SAME', name='pool2')
net = tl.layers.BatchNormLayer(net, act=tl.act.htanh, is_train=True, name='bn2')
net = tl.layers.flatten(net)
net = tl.layers.DenseLayer(net, n_units=40, act = tf.identity, name='output_layer')
There's not flatten attribute. You're looking for the FlattenLayer attribute:
net = tl.layers.FlattenLayer(net)

What is the best way to use the architecture of defined models from tf.keras.applications for non-image dataset?

I'm trying to use models from tf.keras.applications such as VGG16 for my non-image data for my sequential classification task.
My X_train input shape = (# samples, window size, # columns)
Number of classes = 2
What would be the best way to copy architecture of the model and modify parameter details such as input shapes for input/hidden/output layers?
Thanks!
If you are looking for a quick way to find and modify the code that defines the architecture of VGG16 then looking at the source code of Keras would be the easiest one:
# Block 1
x = layers.Conv2D(64, (3, 3),
activation='relu',
padding='same',
name='block1_conv1')(img_input)
x = layers.Conv2D(64, (3, 3),
activation='relu',
padding='same',
name='block1_conv2')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = layers.Conv2D(128, (3, 3),
activation='relu',
padding='same',
name='block2_conv1')(x)
x = layers.Conv2D(128, (3, 3),
activation='relu',
padding='same',
name='block2_conv2')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = layers.Conv2D(256, (3, 3),
activation='relu',
padding='same',
name='block3_conv1')(x)
x = layers.Conv2D(256, (3, 3),
activation='relu',
padding='same',
name='block3_conv2')(x)
x = layers.Conv2D(256, (3, 3),
activation='relu',
padding='same',
name='block3_conv3')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = layers.Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block4_conv1')(x)
x = layers.Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block4_conv2')(x)
x = layers.Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block4_conv3')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5
x = layers.Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block5_conv1')(x)
x = layers.Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block5_conv2')(x)
x = layers.Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block5_conv3')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
if include_top:
# Classification block
x = layers.Flatten(name='flatten')(x)
x = layers.Dense(4096, activation='relu', name='fc1')(x)
x = layers.Dense(4096, activation='relu', name='fc2')(x)
x = layers.Dense(classes, activation='softmax', name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = keras_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = models.Model(inputs, x, name='vgg16')