Keras: Error with Training. Dimension is not what is expected - tensorflow

I am developing a project where I am trying to finetune the last layer of a VGG-FACE model.
But everytime when I try to do the fitting stage I've got the same error:
Traceback (most recent call last):
File "freeze_2.py", line 258, in <module>
model=entrenamos_modelo('vggface_weights_tensorflow.h5')
File "freeze_2.py", line 168, in entrenamos_modelo
model2.fit(train_data,label, nb_epoch=nb_epoch, batch_size=64)
File "/imatge/psereno/workspace/venv-tfg/local/lib/python2.7/site-packages/keras/engine/training.py", line 1057, in fit
batch_size=batch_size)
File "/imatge/psereno/workspace/venv-tfg/local/lib/python2.7/site-packages/keras/engine/training.py", line 984, in _standardize_user_data
exception_prefix='model input')
File "/imatge/psereno/workspace/venv-tfg/local/lib/python2.7/site-packages/keras/engine/training.py", line 111, in standardize_input_data
str(array.shape))
Exception: Error when checking model input: expected input_2 to have shape (None, 3, 224, 224) but got array with shape (1576, 4096, 1, 1)
Here is the code I am using:
from keras.models import Model
from keras.layers import Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Flatten, Dropout, Activation
from keras.models import Sequential
from keras.layers.core import Flatten, Dense, Dropout
from keras.optimizers import SGD
from keras.layers import merge
from keras.models import Merge
import cv2, numpy as np
from PIL import Image
from keras.preprocessing.image import ImageDataGenerator
'''OJOOOOO: PARA QUE FUNCIONE BIEN:
nano ~/.keras/keras.json
{
"image_dim_ordering": "th",
"epsilon": 1e-07,
"floatx": "float32",
"backend": "tensorflow"
}
'''
def bottleneck():
datagen = ImageDataGenerator(rescale=1.)
generator = datagen.flow_from_directory(train_data_dir,
target_size=(img_width, img_height),
batch_size=32,
class_mode=None,
shuffle=False)
pad1_1 = ZeroPadding2D(padding=(1, 1), name='in_train')(img)
conv1_1 = Convolution2D(64, 3, 3, activation='relu', name='conv1_1')(pad1_1)
pad1_2 = ZeroPadding2D(padding=(1, 1))(conv1_1)
conv1_2 = Convolution2D(64, 3, 3, activation='relu', name='conv1_2')(pad1_2)
pool1 = MaxPooling2D((2, 2), strides=(2, 2))(conv1_2)
pad2_1 = ZeroPadding2D((1, 1), trainable=False)(pool1)
conv2_1 = Convolution2D(128, 3, 3, activation='relu', name='conv2_1')(pad2_1)
pad2_2 = ZeroPadding2D((1, 1), trainable=False)(conv2_1)
conv2_2 = Convolution2D(128, 3, 3, activation='relu', name='conv2_2')(pad2_2)
pool2 = MaxPooling2D((2, 2), strides=(2, 2), trainable=False)(conv2_2)
pad3_1 = ZeroPadding2D((1, 1))(pool2)
conv3_1 = Convolution2D(256, 3, 3, activation='relu', name='conv3_1')(pad3_1)
pad3_2 = ZeroPadding2D((1, 1))(conv3_1)
conv3_2 = Convolution2D(256, 3, 3, activation='relu', name='conv3_2')(pad3_2)
pad3_3 = ZeroPadding2D((1, 1))(conv3_2)
conv3_3 = Convolution2D(256, 3, 3, activation='relu', name='conv3_3')(pad3_3)
pool3 = MaxPooling2D((2, 2), strides=(2, 2), trainable=False)(conv3_3)
pad4_1 = ZeroPadding2D((1, 1))(pool3)
conv4_1 = Convolution2D(512, 3, 3, activation='relu', name='conv4_1')(pad4_1)
pad4_2 = ZeroPadding2D((1, 1))(conv4_1)
conv4_2 = Convolution2D(512, 3, 3, activation='relu', name='conv4_2')(pad4_2)
pad4_3 = ZeroPadding2D((1, 1))(conv4_2)
conv4_3 = Convolution2D(512, 3, 3, activation='relu', name='conv4_3')(pad4_3)
pool4 = MaxPooling2D((2, 2), strides=(2, 2))(conv4_3)
pad5_1 = ZeroPadding2D((1, 1))(pool4)
conv5_1 = Convolution2D(512, 3, 3, activation='relu', name='conv5_1')(pad5_1)
pad5_2 = ZeroPadding2D((1, 1))(conv5_1)
conv5_2 = Convolution2D(512, 3, 3, activation='relu', name='conv5_2') (pad5_2)
pad5_3 = ZeroPadding2D((1, 1))(conv5_2)
conv5_3 = Convolution2D(512, 3, 3, activation='relu', name='conv5_3')(pad5_3)
pool5 = MaxPooling2D((2, 2), strides=(2, 2))(conv5_3)
fc6 = Convolution2D(4096, 7, 7, activation='relu', name='fc6')(pool5)
fc6_drop = Dropout(0.5)(fc6)
model = Model(input=img, output=fc6_drop)
bottleneck_features_train = model.predict_generator(generator, nb_train_samples)
np.save(open('features.npy', 'w'), bottleneck_features_train)
def entrenamos_modelo(weights_path=None):
train_data = np.load(open('features.npy'))
print(train_data.shape)
train_labels = np.array(
[0] * (nb_train_samples / 8) + [1] * (nb_train_samples / 8) + [2] * (nb_train_samples / 8) + [3] * (
nb_train_samples / 8) + [4] * (nb_train_samples / 8) + [5] * (nb_train_samples / 8) + [6] * (
nb_train_samples / 8) + [7] * (nb_train_samples / 8))
lbl1 = np.array([[1, 0, 0, 0, 0, 0, 0, 0], ] * 197)
lbl2 = np.array([[0, 1, 0, 0, 0, 0, 0, 0], ] * 197)
lbl3 = np.array([[0, 0, 1, 0, 0, 0, 0, 0], ] * 197)
lbl4 = np.array([[0, 0, 0, 1, 0, 0, 0, 0], ] * 197)
lbl5 = np.array([[0, 0, 0, 0, 1, 0, 0, 0], ] * 197)
lbl6 = np.array([[0, 0, 0, 0, 0, 1, 0, 0], ] * 197)
lbl7 = np.array([[0, 0, 0, 0, 0, 0, 1, 0], ] * 197)
lbl8 = np.array([[0, 0, 0, 0, 0, 0, 0, 1], ] * 197)
label = np.concatenate([lbl1, lbl2, lbl3, lbl4, lbl5, lbl6, lbl7, lbl8])
'''train_labels --> loss='sparse_categorical_crossentropy'
labels --> loss='categorical_crossentropy'
'''
#MODEL VGG (the old model)
pad1_1 = ZeroPadding2D(padding=(1, 1), trainable=False, input_shape=(4096, 1, 1), name='in_train')(img)
conv1_1 = Convolution2D(64, 3, 3, activation='relu', name='conv1_1', trainable=False)(pad1_1)
pad1_2 = ZeroPadding2D(padding=(1, 1), trainable=False)(conv1_1)
conv1_2 = Convolution2D(64, 3, 3, activation='relu', name='conv1_2', trainable=False)(pad1_2)
pool1 = MaxPooling2D((2, 2), strides=(2, 2), trainable=False)(conv1_2)
pad2_1 = ZeroPadding2D((1, 1), trainable=False)(pool1)
conv2_1 = Convolution2D(128, 3, 3, activation='relu', name='conv2_1', trainable=False)(pad2_1)
pad2_2 = ZeroPadding2D((1, 1), trainable=False)(conv2_1)
conv2_2 = Convolution2D(128, 3, 3, activation='relu', name='conv2_2', trainable=False)(pad2_2)
pool2 = MaxPooling2D((2, 2), strides=(2, 2), trainable=False)(conv2_2)
pad3_1 = ZeroPadding2D((1, 1), trainable=False)(pool2)
conv3_1 = Convolution2D(256, 3, 3, activation='relu', name='conv3_1', trainable=False)(pad3_1)
pad3_2 = ZeroPadding2D((1, 1), trainable=False)(conv3_1)
conv3_2 = Convolution2D(256, 3, 3, activation='relu', name='conv3_2', trainable=False)(pad3_2)
pad3_3 = ZeroPadding2D((1, 1), trainable=False)(conv3_2)
conv3_3 = Convolution2D(256, 3, 3, activation='relu', name='conv3_3', trainable=False)(pad3_3)
pool3 = MaxPooling2D((2, 2), strides=(2, 2), trainable=False)(conv3_3)
pad4_1 = ZeroPadding2D((1, 1), trainable=False)(pool3)
conv4_1 = Convolution2D(512, 3, 3, activation='relu', name='conv4_1', trainable=False)(pad4_1)
pad4_2 = ZeroPadding2D((1, 1), trainable=False)(conv4_1)
conv4_2 = Convolution2D(512, 3, 3, activation='relu', name='conv4_2', trainable=False)(pad4_2)
pad4_3 = ZeroPadding2D((1, 1), trainable=False)(conv4_2)
conv4_3 = Convolution2D(512, 3, 3, activation='relu', name='conv4_3', trainable=False)(pad4_3)
pool4 = MaxPooling2D((2, 2), strides=(2, 2) , trainable=False)(conv4_3)
pad5_1 = ZeroPadding2D((1, 1) , trainable=False)(pool4)
conv5_1 = Convolution2D(512, 3, 3, activation='relu', name='conv5_1', trainable=False)(pad5_1)
pad5_2 = ZeroPadding2D((1, 1), trainable=False)(conv5_1)
conv5_2 = Convolution2D(512, 3, 3, activation='relu', name='conv5_2', trainable=False)(pad5_2)
pad5_3 = ZeroPadding2D((1, 1), trainable=False)(conv5_2)
conv5_3 = Convolution2D(512, 3, 3, activation='relu', name='conv5_3', trainable=False)(pad5_3)
pool5 = MaxPooling2D((2, 2), strides=(2, 2), trainable=False)(conv5_3)
fc6 = Convolution2D(4096, 7, 7, activation='relu', name='fc6', trainable=False)(pool5)
fc6_drop = Dropout(0.5)(fc6)
#We TRAIN this layer
fc7 = Convolution2D(4096, 1, 1, activation='relu', name='fc7', trainable=False)(fc6_drop)
fc7_drop = Dropout(0.5)(fc7)
fc8 = Convolution2D(2622, 1, 1, name='fc8', trainable=False)(fc7_drop)
flat = Flatten()(fc8)
out = Activation('softmax')(flat)
model = Model(input=img, output=out)
#We load the weight of the old model so when we construct ours we dont have to retrain all of it.
if weights_path:
model.load_weights(weights_path)
# We construct our new model: first 14 layers of the old + two new ones. The new FC has to be trained and the Softmax layer too.
fc7_n = Convolution2D(4096, 1, 1, activation='relu', name='fc7_n', trainable=True, input_shape=train_data.shape[1:])(fc6_drop)
fc7_drop_n = Dropout(0.5)(fc7_n)
fc8_n = Convolution2D(8, 1, 1, name='fc8_n', trainable=False)(fc7_drop_n)
flat_n = Flatten(name='flat_n')(fc8_n)
out_n = Activation('softmax')(flat_n)
model2 = Model(input=img, output=out_n)
#model2.summary()
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model2.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
'''train_labels --> loss='sparse_categorical_crossentropy'
labels --> loss='categorical_crossentropy'
'''
model2.fit(train_data,label, nb_epoch=nb_epoch, batch_size=64)
print('Model Trained')
#We save the weights so we can load them in our model
model2.save_weights(pesos_entrenados) # always save your weights after training or during training
#We have two options: 1) Return the model here or in the vgg_trained_model Function
return model2
if __name__ == "__main__":
im = Image.open('A.J._Buckley.jpg')
im = im.resize((224, 224))
im = np.array(im).astype(np.float32)
im = im.transpose((2, 0, 1))
im = np.expand_dims(im, axis=0)
# For the training stage
img_width, img_height = 224, 224
img = Input(shape=(3, img_height, img_width))
train_data_dir = 'merge/train'
pesos_entrenados='Modelo_Reentrenado.h5'
# validation_data_dir = 'data/validation'
nb_train_samples = 1576 # 197 per class and we have 8 classes (8 emotions)
nb_validation_samples = 0
nb_epoch = 20
# Stages to construct the model
bottleneck() #Reduce the computational cost
model=entrenamos_modelo('vggface_weights_tensorflow.h5') #Construction of the model
#model.summary()
out = model.predict(im)
print(out[0][0])
The 'vggface_weights_tensorflow.h5' are a conversion from the model weights in theano to tensorflow. I have used the following script to convert the weights:
model = Model(input=img, output=out)
weights_path = 'vgg-face-keras.h5'
model.load_weights(weights_path)
ops = []
for layer in model.layers:
if layer.__class__.__name__ in ['Convolution1D', 'Convolution2D', 'Convolution3D', 'AtrousConvolution2D']:
original_w = K.get_value(layer.W)
converted_w = convert_kernel(original_w)
ops.append(tf.assign(layer.W, converted_w).op)
K.get_session().run(ops)
model.save_weights('vggface_weights_tensorflow.h5')
The original scripts are from here:
https://gist.github.com/EncodeTS/6bbe8cb8bebad7a672f0d872561782d9
If anyone knows how to solve the problem I will always be grateful.

Have you resized all your images so they are 224 by 224?
The error looks like the model expects a list of images of any size, with 3 channels (RGB), 224 wide and 224 height. It looks more like its the input of your data rather than anything with Keras/the code. Try making sure your directory and data are set up as the model expects?

Related

Simultaneously training two CNN models

I wish to train two CNN models in the same training loop where the input of the second model is the feature map generated by the forward pass on the first model.
def custom_training(inputd, x_train, y_train, x_val, y_val, n_epochs):
optimizer = tf.keras.optimizers.Adam()
ce_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False)
loss_train = np.zeros(shape=(n_epochs,), dtype=np.float32)
acc_train = np.zeros(shape=(n_epochs,), dtype=np.float32)
loss_val = np.zeros(shape=(n_epochs,))
acc_val = np.zeros(shape=(n_epochs,))
unet_model = tf.keras.Model(inputd, build_unet(inputd, 16))
#ds_model = DS_module()
n_batches = len(generator_train)
print(n_batches)
for epoch in range(n_epochs):
epoch_loss_avg = tf.keras.metrics.Mean() # Keeping track of the training loss
epoch_acc_avg = tf.keras.metrics.Mean() # Keeping track of the training accuracy
for batch in range(n_batches):
x, y = generator_train[batch]
with tf.GradientTape() as tape: # Forward pass
y_ = unet_model(x, training=True)
loss = ce_loss(y_true=y, y_pred=y_)
k = compute_K(y_)
print(k)
ds_model = tf.keras.Model(y_, DS_module(y_, k, num_class))
grad = tape.gradient(loss, unet_model.trainable_variables) # Backpropagation
optimizer.apply_gradients(zip(grad, unet_model.trainable_variables)) # Update network weights
epoch_loss_avg(loss)
epoch_acc_avg(accuracy_score(y_true=y, y_pred=np.argmax(y_, axis=-1)))
#generator.on_epoch_end()
loss_train[epoch] = epoch_loss_avg.result()
acc_train[epoch] = epoch_acc_avg.result()
y_ = unet_model.predict(x_val) # Validation predictions
loss_val[epoch] = ce_loss(y_true=y_val, y_pred=y_).numpy()
acc_val[epoch] = accuracy_score(y_true=y_val, y_pred=np.argmax(y_, axis=-1))
Below are the content of the two models, which consist of Unet, and another convolutional model, which will take as input the feature map generated by the Unet model:
def build_unet(input_layer, start_neurons):
# 128 -> 64
conv1 = Conv2D(start_neurons * 1, (3, 3), activation="relu", padding="same")(input_layer)
conv1 = Conv2D(start_neurons * 1, (3, 3), activation="relu", padding="same")(conv1)
pool1 = MaxPooling2D((2, 2))(conv1)
pool1 = Dropout(0.25)(pool1)
# 64 -> 32
conv2 = Conv2D(start_neurons * 2, (3, 3), activation="relu", padding="same")(pool1)
conv2 = Conv2D(start_neurons * 2, (3, 3), activation="relu", padding="same")(conv2)
pool2 = MaxPooling2D((2, 2))(conv2)
pool2 = Dropout(0.5)(pool2)
# 32 -> 16
conv3 = Conv2D(start_neurons * 4, (3, 3), activation="relu", padding="same")(pool2)
conv3 = Conv2D(start_neurons * 4, (3, 3), activation="relu", padding="same")(conv3)
pool3 = MaxPooling2D((2, 2))(conv3)
pool3 = Dropout(0.5)(pool3)
# 16 -> 8
conv4 = Conv2D(start_neurons * 8, (3, 3), activation="relu", padding="same")(pool3)
conv4 = Conv2D(start_neurons * 8, (3, 3), activation="relu", padding="same")(conv4)
pool4 = MaxPooling2D((2, 2))(conv4)
pool4 = Dropout(0.5)(pool4)
# Middle
convm = Conv2D(start_neurons * 16, (3, 3), activation="relu", padding="same")(pool4)
convm = Conv2D(start_neurons * 16, (3, 3), activation="relu", padding="same")(convm)
# 8 -> 16
deconv4 = Conv2DTranspose(start_neurons * 8, (3, 3), strides=(2, 2), padding="same")(convm)
uconv4 = concatenate([deconv4, conv4])
uconv4 = Dropout(0.5)(uconv4)
uconv4 = Conv2D(start_neurons * 8, (3, 3), activation="relu", padding="same")(uconv4)
uconv4 = Conv2D(start_neurons * 8, (3, 3), activation="relu", padding="same")(uconv4)
# 16 -> 32
deconv3 = Conv2DTranspose(start_neurons * 4, (3, 3), strides=(2, 2), padding="same")(uconv4)
uconv3 = concatenate([deconv3, conv3])
uconv3 = Dropout(0.5)(uconv3)
uconv3 = Conv2D(start_neurons * 4, (3, 3), activation="relu", padding="same")(uconv3)
uconv3 = Conv2D(start_neurons * 4, (3, 3), activation="relu", padding="same")(uconv3)
# 32 -> 64
deconv2 = Conv2DTranspose(start_neurons * 2, (3, 3), strides=(2, 2), padding="same")(uconv3)
uconv2 = concatenate([deconv2, conv2])
uconv2 = Dropout(0.5)(uconv2)
uconv2 = Conv2D(start_neurons * 2, (3, 3), activation="relu", padding="same")(uconv2)
uconv2 = Conv2D(start_neurons * 2, (3, 3), activation="relu", padding="same")(uconv2)
# 64 -> 128
deconv1 = Conv2DTranspose(start_neurons * 1, (3, 3), strides=(2, 2), padding="same")(uconv2)
uconv1 = concatenate([deconv1, conv1])
uconv1 = Dropout(0.5)(uconv1)
uconv1 = Conv2D(start_neurons * 1, (3, 3), activation="relu", padding="same")(uconv1)
uconv1 = Conv2D(start_neurons * 1, (3, 3), activation="relu", padding="same")(uconv1)
feature_maps = Conv2D(start_neurons, (1, 1), activation='relu')(uconv1)
return feature_maps
#tf.function
def DS_module(feature_map, prototypes, num_class):
ED=DS1(prototypes, 16)(feature_map)
ED_ac=DS1_activate(prototypes)(ED)
mass_prototypes=DS2(prototypes, num_class)(ED_ac)
mass_prototypes_omega=DS2_omega(prototypes, num_class)(mass_prototypes)
mass_Dempster=DS3_Dempster(prototypes, num_class)(mass_prototypes_omega)
pignistic=DM_pignistic(num_class)(mass_Dempster)
pignistic=DS3_normalize()(pignistic)
outputs = pignistic
return outputs
When I execute this code, i obtain the following error:
Error message obtained
This is the content of compute_k function, it executes PCA, Kmeans and Silhouette score on the feature map.
def compute_K(featureMap):
X = tf.reshape(featureMap, [featureMap.shape[0]*featureMap.shape[1]*featureMap.shape[2], featureMap.shape[3]])
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(X)
principalDf = pd.DataFrame(data = principalComponents, columns = ['principal component 1', 'principal component 2'])
X = principalDf.to_numpy()
print(len(X))
#range_n_clusters = [3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 15, 16, 17, 18, 19, 20]
range_n_clusters = [3, 4, 5, 6, 7]
silhouette_avg_n_clusters = []
max_sillhouette = 0
max_k = 0
plt.figure(figsize=(15, 7))
plt.subplot(111)
for n_clusters in range_n_clusters:
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=42)
cluster_labels = clusterer.fit_predict(X)
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
silhouette_avg_n_clusters.append(silhouette_avg)
if (n_clusters == 2):
max_sillhouette = silhouette_avg
max_k = n_clusters
if (max_sillhouette< silhouette_avg):
max_sillhouette = silhouette_avg
max_k = n_clusters
return max_k

Distribute Tensor over Multiple GPUs

I am attempting to train a model in which the input exceeds the memory limits for a single GPU on the system (16 GB P100). The size of the input is (1,256,256,64,2). However, I have access to 4 identical GPUs on the system. I know I can distribute processes with tf.distribute but I am unsure how to do this with a batch size of 1. Is it possible to distribute a single sample over multiple GPUs so I don't receive OOM errors?
Edit:
Here is the code used to build the model.
def dice_loss(y_true, y_pred):
numerator = 2 * tf.reduce_sum(y_true * y_pred, axis=(1,2,3))
denominator = tf.reduce_sum(y_true + y_pred, axis=(1,2,3))
return tf.reshape(1 - numerator / denominator, (-1, 1, 1))
class ResidualUnitEncode(keras.layers.Layer):
def __init__(self, filters=1, strides=1, activation="relu", **kwargs):
super().__init__(**kwargs)
self.activation = keras.activations.get(activation)
self.main_layers = [
keras.layers.Conv3D(filters, (3, 3, 3), strides=strides,
padding="same", use_bias=False),
keras.layers.BatchNormalization(),
self.activation,
keras.layers.Conv3D(filters, (3, 3, 3), strides=1,
padding="same", use_bias=False),
keras.layers.BatchNormalization()]
self.skip_layers = []
if strides > 1:
self.skip_layers = [
keras.layers.Conv3D(filters, (1, 1, 1), strides=strides,
padding="same", use_bias=False),
keras.layers.BatchNormalization()]
def call(self, inputs):
Z = inputs
for layer in self.main_layers:
Z = layer(Z)
skip_Z = inputs
for layer in self.skip_layers:
skip_Z = layer(skip_Z)
return self.activation(Z + skip_Z)
def get_config(self):
base_config = super(ResidualUnitEncode, self).get_config()
return base_config
class ResidualUnitDecode(keras.layers.Layer):
def __init__(self, filters=1, strides=1, activation="relu", **kwargs):
super().__init__(**kwargs)
self.activation = keras.activations.get(activation)
self.main_layers = [
keras.layers.Conv3DTranspose(filters, (3, 3, 3), strides=1,
padding="same", use_bias=False),
keras.layers.BatchNormalization(),
self.activation,
keras.layers.Conv3DTranspose(filters, (3, 3, 3), strides=strides,
padding="same", use_bias=False),
keras.layers.BatchNormalization()]
self.skip_layers = []
if strides > 1:
self.skip_layers = [
keras.layers.Conv3DTranspose(filters, (3, 3, 3), strides=strides,
padding="same", use_bias=False),
keras.layers.BatchNormalization()]
def call(self, inputs):
Z = inputs
for layer in self.main_layers:
Z = layer(Z)
skip_Z = inputs
for layer in self.skip_layers:
skip_Z = layer(skip_Z)
return self.activation(Z + skip_Z)
def get_config(self):
base_config = super(ResidualUnitDecode, self).get_config()
return base_config
def build_unet(image_shape, batch_size):
inputs = keras.layers.Input(shape=image_shape, batch_size=batch_size)
conv1 = keras.layers.Conv3D(64, (7, 7, 7), strides=(2, 2, 1), padding="same", use_bias=False, input_shape=image_shape)(inputs)
conv1 = keras.layers.BatchNormalization()(conv1)
conv1 = keras.layers.Activation("relu")(conv1)
pool1 = keras.layers.MaxPool3D(pool_size=(3, 3, 3), strides=1, padding="same")(conv1)
conv2 = ResidualUnitEncode(filters=128, strides=2)(pool1)
pool2 = keras.layers.MaxPool3D(pool_size=(3, 3, 3), strides=1, padding="same")(conv2)
conv3 = ResidualUnitEncode(filters=256, strides=2)(pool2)
pool3 = keras.layers.MaxPool3D(pool_size=(3, 3, 3), strides=1, padding="same")(conv3)
conv4 = ResidualUnitEncode(filters=512, strides=2)(pool3)
pool4 = keras.layers.MaxPool3D(pool_size=(3, 3, 3), strides=1, padding="same")(conv4)
conv5 = ResidualUnitEncode(filters=1024, strides=2)(pool4)
drop5 = keras.layers.Dropout(0.5)(conv5)
up6 = ResidualUnitDecode(filters=512, strides=2)(drop5)
merge6 = keras.layers.concatenate([conv4, up6], axis=4)
conv6 = ResidualUnitEncode(filters=512, strides=2)(merge6)
conv6 = keras.layers.UpSampling3D(size=(2,2,2))(conv6)
up7 = ResidualUnitDecode(filters=256, strides=2)(conv6)
merge7 = keras.layers.concatenate([conv3, up7], axis=4)
conv7 = ResidualUnitEncode(filters=256, strides=2)(merge7)
conv7 = keras.layers.UpSampling3D(size=(2, 2, 2))(conv7)
up8 = ResidualUnitDecode(filters=128, strides=2)(conv7)
merge8 = keras.layers.concatenate([conv2, up8], axis=4)
conv8 = ResidualUnitEncode(filters=128, strides=2)(merge8)
conv8 = keras.layers.UpSampling3D(size=(2, 2, 2))(conv8)
up9 = ResidualUnitDecode(filters=64, strides=2)(conv8)
merge9 = keras.layers.concatenate([conv1, up9], axis=4)
conv9 = ResidualUnitDecode(filters=64, strides=2)(merge9)
conv10 = keras.layers.Conv3D(1,1, strides=(1,1,2),activation="sigmoid")(conv9)
model = keras.Model(inputs, conv10)
model.compile(optimizer=keras.optimizers.Adam(lr=0.001), loss=dice_loss)
model.summary()
return model
Here is the code to run the training using Kfold CV:
image_shape = [256,256,64,2]
dataset = tf.data.TFRecordDataset('train.tfrecord').map(parse_record).batch(69)
nx = tf.compat.v1.data.make_one_shot_iterator(dataset)
x, y = nx.get_next()
x_test = x[55:69, ...]
y_test = y[55:69, ...]
x_train = x[0:54, ...]
y_train = y[0:54, ...]
kfold = KFold(n_splits=10, shuffle=True)
fold_no = 1
acc_per_fold = []
loss_per_fold = []
for train, test in kfold.split(x_train, y_train):
model = build_unet(image_shape=image_shape, batch_size=1)
early_stopping = keras.callbacks.EarlyStopping(monitor='val_loss')
model_file_name = './Fold_' + str(fold_no) + '_best_model.h5'
model_checkpoint = keras.callbacks.ModelCheckpoint(model_file_name, monitor='val_loss')
log_dir_name = './Fold_' + str(fold_no) + '_log_dir'
tb = keras.callbacks.TensorBoard(log_dir_name)
print('------------------------------------------------------------------------')
print(f'Training for fold {fold_no} ...')
train_id_rows = tf.constant(train.reshape(-1,1))
test_id_rows = tf.constant(test.reshape(-1,1))
x_train_train = tf.gather_nd(x_train, train_id_rows)
y_train_train = tf.gather_nd(y_train, train_id_rows)
x_train_test = tf.gather_nd(x_train, test_id_rows)
y_train_test = tf.gather_nd(y_train, test_id_rows)
history = model.fit(x_train_train, y_train_train, epochs=N_EPOCHS, callbacks=[tb, model_checkpoint, early_stopping], batch_size=1)
scores = model.evaluate(x_train_test, y_train_test, verbose=0)
acc_per_fold.append(scores[1] * 100)
loss_per_fold.append(scores[0])
fold_no = fold_no + 1
There are 69 total samples in the dataset, 54 used for the training/validation loop.

Graph disconnected: cannot obtain value for tensor Tensor("input_1:0", shape=(None, 299, 299, 3)) at layer "input_1"

Graph disconnected: cannot obtain value for tensor Tensor("input_1:0", shape=(None, 299, 299, 3), dtype=float32) at layer "input_1". The following previous layers were accessed without issue: []
How can I eliminate the error? I am trying to build inceptionv3 network and call but model is not getting compiled. I believe the input layer is not at all getting inputs but i don't understand why
from __future__ import print_function
from __future__ import absolute_import
import warnings
import numpy as np
import keras
from keras.models import Model
from keras import layers
from keras.layers import Activation
from keras.layers import Dense
from keras.layers import Input
from keras.layers import BatchNormalization
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import AveragePooling2D
from keras.layers import GlobalAveragePooling2D
from keras.layers import GlobalMaxPooling2D
from keras.engine.topology import get_source_inputs
from keras.utils.layer_utils import convert_all_kernels_in_model
from keras.utils.data_utils import get_file
from keras import backend as K
from keras.applications.imagenet_utils import decode_predictions
#from keras.applications.imagenet_utils import _obtain_input_shape
from keras.preprocessing import image
WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.5/inception_v3_weights_tf_dim_ordering_tf_kernels.h5'
WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.5/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'
def conv2d_bn(x,filters,num_row,num_col,padding='same',strides=(1, 1),name=None):
if name is not None:
bn_name = name + '_bn'
conv_name = name + '_conv'
else:
bn_name = None
conv_name = None
if K.image_data_format() == 'channels_first':
bn_axis = 1
else:
bn_axis = 3
x = Conv2D(filters, (num_row, num_col),strides=strides,padding=padding,use_bias=False,name=conv_name)(x)
x = BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x)
x = Activation('relu', name=name)(x)
return x
def InceptionV3(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000):
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as imagenet with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = (299,299,3)
if input_tensor is None:
img_input = keras.layers.Input(shape=input_shape)
else:
img_input = keras.layers.Input(tensor=input_tensor, shape=input_shape)
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = 3
x = conv2d_bn(img_input, 32, 3, 3, strides=(2, 2), padding='valid')
x = conv2d_bn(x, 32, 3, 3, padding='valid')
x = conv2d_bn(x, 64, 3, 3)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv2d_bn(x, 80, 1, 1, padding='valid')
x = conv2d_bn(x, 192, 3, 3, padding='valid')
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
# mixed 0: 35 x 35 x 256, type 1 module
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 32, 1, 1)
x = layers.concatenate(
[branch1x1, branch5x5, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed0')
# mixed 1: 35 x 35 x 288, type 1 module
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
x = layers.concatenate(
[branch1x1, branch5x5, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed1')
# mixed 2: 35 x 35 x 288, type 1 module
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
x = layers.concatenate(
[branch1x1, branch5x5, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed2')
# mixed 3: 17 x 17 x 768,type 2module
branch3x3 = conv2d_bn(x, 384, 3, 3, strides=(2, 2), padding='valid')
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(
branch3x3dbl, 96, 3, 3, strides=(2, 2), padding='valid')
branch_pool = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = layers.concatenate(
[branch3x3, branch3x3dbl, branch_pool], axis=channel_axis, name='mixed3')
# mixed 4: 17 x 17 x 768,type3 module
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 128, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 128, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 128, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch7x7, branch7x7dbl, branch_pool],
axis=channel_axis,
name='mixed4')
# mixed 5, 6: 17 x 17 x 768,type3 modules
for i in range(2):
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 160, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 160, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 160, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D(
(3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch7x7, branch7x7dbl, branch_pool],
axis=channel_axis,
name='mixed' + str(5 + i))
# mixed 7: 17 x 17 x 768, type 3 module
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 192, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 192, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch7x7, branch7x7dbl, branch_pool],
axis=channel_axis,
name='mixed7')
# mixed 8: 8 x 8 x 1280, type 4 module
branch3x3 = conv2d_bn(x, 192, 1, 1)
branch3x3 = conv2d_bn(branch3x3, 320, 3, 3,
strides=(2, 2), padding='valid')
branch7x7x3 = conv2d_bn(x, 192, 1, 1)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 1, 7)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 7, 1)
branch7x7x3 = conv2d_bn(
branch7x7x3, 192, 3, 3, strides=(2, 2), padding='valid')
branch_pool = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = layers.concatenate(
[branch3x3, branch7x7x3, branch_pool], axis=channel_axis, name='mixed8')
# mixed 9: 8 x 8 x 2048, type 5 modules
for i in range(2):
branch1x1 = conv2d_bn(x, 320, 1, 1)
branch3x3 = conv2d_bn(x, 384, 1, 1)
branch3x3_1 = conv2d_bn(branch3x3, 384, 1, 3)
branch3x3_2 = conv2d_bn(branch3x3, 384, 3, 1)
branch3x3 = layers.concatenate(
[branch3x3_1, branch3x3_2], axis=channel_axis, name='mixed9_' + str(i))
branch3x3dbl = conv2d_bn(x, 448, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 384, 3, 3)
branch3x3dbl_1 = conv2d_bn(branch3x3dbl, 384, 1, 3)
branch3x3dbl_2 = conv2d_bn(branch3x3dbl, 384, 3, 1)
branch3x3dbl = layers.concatenate(
[branch3x3dbl_1, branch3x3dbl_2], axis=channel_axis)
branch_pool = AveragePooling2D(
(3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch3x3, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed' + str(9 + i))
x = GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
img_input = keras.layers.Input(shape=input_shape)
inputs = img_input
# Create model.
model = Model(inputs,x, name='inception_v3')
'''
# load weights
if weights == 'imagenet':
if K.image_data_format() == 'channels_first':
if K.backend() == 'tensorflow':
warnings.warn('You are using the TensorFlow backend, yet you '
'are using the Theano '
'image data format convention '
'(`image_data_format="channels_first"`). '
'For best performance, set '
'`image_data_format="channels_last"` in '
'your Keras config '
'at ~/.keras/keras.json.')
if include_top:
weights_path = get_file(
'inception_v3_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH,
cache_subdir='models',
md5_hash='9a0d58056eeedaa3f26cb7ebd46da564')
else:
weights_path = get_file(
'inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='bcbd6486424b2319ff4ef7d526e38f63')
model.load_weights(weights_path)
if K.backend() == 'theano':
convert_all_kernels_in_model(model)
'''
return model
model = InceptionV3(include_top=False, weights='imagenet',input_shape=(299,299,3))
print(model.summary())
try to comment this lines:
# if input_tensor is not None:
# inputs = get_source_inputs(input_tensor)
# else:
# img_input = Input(shape=input_shape)
# inputs = img_input
and create your model in this way:
model = Model(img_input, x, name='inception_v3')

Getting dynamic shape of tensors

I want to create a custom metric in Keras with tensorflow backend.
Say, we have a minimalized metric, where we want to get the shape of a dynamic shaped tensor. I'm trying to do this as below:
def metric(y_true, y_pred):
y_num = (tf.shape(y_true))[0]
K.get_session().run(y_num)
return anything
But I always get the error message:
InvalidArgumentError: You must feed a value for placeholder tensor 'conv2d_47_target' with dtype float and shape [?,?,?,?]
[[Node: conv2d_47_target = Placeholder[dtype=DT_FLOAT, shape=[?,?,?,?], _device="/job:localhost/replica:0/task:0/device:GPU:0"]()]]
[[Node: metrics/metric/strided_slice/_1079 = _Recv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/device:CPU:0", send_device="/job:localhost/replica:0/task:0/device:GPU:0", send_device_incarnation=1, tensor_name="edge_11_metrics/metric/strided_slice", tensor_type=DT_INT32, _device="/job:localhost/replica:0/task:0/device:CPU:0"]()]]
I can't figure out, where could be the problem.
And if there is some problem in my code, how can I still get the shape some other way?
Any help would be highly appreciated.
UPDATE:
Model code:
ACTIVATION = "relu"
def convolution_block(x, filters, size, strides=(1,1), padding='same', activation=True):
x = Conv2D(filters, size, strides=strides, padding=padding)(x)
x = BatchNormalization()(x)
if activation == True:
x = Activation(ACTIVATION)(x)
return x
def residual_block(blockInput, num_filters=16):
x = Activation(ACTIVATION)(blockInput)
x = BatchNormalization()(x)
x = convolution_block(x, num_filters, (3,3) )
x = convolution_block(x, num_filters, (3,3), activation=False)
x = Add()([x, blockInput])
return x
# Build model
def build_model(input_layer, start_neurons, DropoutRatio = 0.5):
# 101 -> 50
conv1 = Conv2D(start_neurons * 1, (3, 3), activation=None, padding="same")(input_layer)
conv1 = residual_block(conv1,start_neurons * 1)
conv1 = residual_block(conv1,start_neurons * 1)
conv1 = Activation(ACTIVATION)(conv1)
pool1 = MaxPooling2D((2, 2))(conv1)
pool1 = Dropout(DropoutRatio/2)(pool1)
# 50 -> 25
conv2 = Conv2D(start_neurons * 2, (3, 3), activation=None, padding="same")(pool1)
conv2 = residual_block(conv2,start_neurons * 2)
conv2 = residual_block(conv2,start_neurons * 2)
conv2 = Activation(ACTIVATION)(conv2)
pool2 = MaxPooling2D((2, 2))(conv2)
pool2 = Dropout(DropoutRatio)(pool2)
# 25 -> 12
conv3 = Conv2D(start_neurons * 4, (3, 3), activation=None, padding="same")(pool2)
conv3 = residual_block(conv3,start_neurons * 4)
conv3 = residual_block(conv3,start_neurons * 4)
conv3 = Activation(ACTIVATION)(conv3)
pool3 = MaxPooling2D((2, 2))(conv3)
pool3 = Dropout(DropoutRatio)(pool3)
# 12 -> 6
conv4 = Conv2D(start_neurons * 8, (3, 3), activation=None, padding="same")(pool3)
conv4 = residual_block(conv4,start_neurons * 8)
conv4 = residual_block(conv4,start_neurons * 8)
conv4 = Activation(ACTIVATION)(conv4)
pool4 = MaxPooling2D((2, 2))(conv4)
pool4 = Dropout(DropoutRatio)(pool4)
# Middle
convm = Conv2D(start_neurons * 16, (3, 3), activation=None, padding="same")(pool4)
convm = residual_block(convm,start_neurons * 16)
convm = residual_block(convm,start_neurons * 16)
convm = Activation(ACTIVATION)(convm)
# 6 -> 12
deconv4 = Conv2DTranspose(start_neurons * 8, (3, 3), strides=(2, 2), padding="same")(convm)
uconv4 = concatenate([deconv4, conv4])
uconv4 = Dropout(DropoutRatio)(uconv4)
uconv4 = Conv2D(start_neurons * 8, (3, 3), activation=None, padding="same")(uconv4)
uconv4 = residual_block(uconv4,start_neurons * 8)
uconv4 = residual_block(uconv4,start_neurons * 8)
uconv4 = Activation(ACTIVATION)(uconv4)
# 12 -> 25
#deconv3 = Conv2DTranspose(start_neurons * 4, (3, 3), strides=(2, 2), padding="same")(uconv4)
deconv3 = Conv2DTranspose(start_neurons * 4, (3, 3), strides=(2, 2), padding="valid")(uconv4)
uconv3 = concatenate([deconv3, conv3])
uconv3 = Dropout(DropoutRatio)(uconv3)
uconv3 = Conv2D(start_neurons * 4, (3, 3), activation=None, padding="same")(uconv3)
uconv3 = residual_block(uconv3,start_neurons * 4)
uconv3 = residual_block(uconv3,start_neurons * 4)
uconv3 = Activation(ACTIVATION)(uconv3)
# 25 -> 50
deconv2 = Conv2DTranspose(start_neurons * 2, (3, 3), strides=(2, 2), padding="same")(uconv3)
uconv2 = concatenate([deconv2, conv2])
uconv2 = Dropout(DropoutRatio)(uconv2)
uconv2 = Conv2D(start_neurons * 2, (3, 3), activation=None, padding="same")(uconv2)
uconv2 = residual_block(uconv2,start_neurons * 2)
uconv2 = residual_block(uconv2,start_neurons * 2)
uconv2 = Activation(ACTIVATION)(uconv2)
# 50 -> 101
#deconv1 = Conv2DTranspose(start_neurons * 1, (3, 3), strides=(2, 2), padding="same")(uconv2)
deconv1 = Conv2DTranspose(start_neurons * 1, (3, 3), strides=(2, 2), padding="valid")(uconv2)
uconv1 = concatenate([deconv1, conv1])
uconv1 = Dropout(DropoutRatio)(uconv1)
uconv1 = Conv2D(start_neurons * 1, (3, 3), activation=None, padding="same")(uconv1)
uconv1 = residual_block(uconv1,start_neurons * 1)
uconv1 = residual_block(uconv1,start_neurons * 1)
uconv1 = Activation(ACTIVATION)(uconv1)
uconv1 = Dropout(DropoutRatio/2)(uconv1)
output_layer = Conv2D(1, (1,1), padding="same", activation="sigmoid")(uconv1)
#output_layer = Conv2D(1, (1,1), padding="same", activation="sigmoid")(uconv1)
return output_layer

You must feed value for placeholder *_sample_weights while training UNET from VGG16

I am trying to create a UNET using VGG16 as first layers.
def BuildUNet2():
keras.backend.set_learning_phase(1)
inputs = keras.layers.Input(shape=(PATCH_SIZE, PATCH_SIZE, 3), name="inputs")
vggModel=keras.applications.VGG16(include_top=False, input_tensor=inputs)
layers = dict([(layer.name, layer) for layer in vggModel.layers])
print("Layers", len(layers), layers)
block1_conv2 = layers["block1_conv2"].output
block2_conv2 = layers["block2_conv2"].output
block3_conv3 = layers["block3_conv3"].output
block4_conv3 = layers["block4_conv3"].output
vggTop = layers["block5_conv3"].output
up6=keras.layers.concatenate([keras.layers.Conv2DTranspose(256, (2,2), strides=(2,2), padding="same")(vggTop), block4_conv3], axis=3)
conv61=keras.layers.Conv2D(256, 3, activation="relu", padding="same", kernel_initializer="he_normal")(up6)
conv62=keras.layers.Conv2D(256, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv61)
up7 = keras.layers.concatenate([keras.layers.Conv2DTranspose(128, (2, 2), strides=(2, 2), padding="same")(conv62), block3_conv3], axis=3)
conv71=keras.layers.Conv2D(128, 3, activation="relu", padding="same", kernel_initializer="he_normal")(up7)
conv72=keras.layers.Conv2D(128, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv71)
up8 = keras.layers.concatenate([keras.layers.Conv2DTranspose(64, (2, 2), strides=(2, 2), padding="same")(conv72), block2_conv2], axis=3)
conv81=keras.layers.Conv2D(64, 3, activation="relu", padding="same", kernel_initializer="he_normal")(up8)
conv82=keras.layers.Conv2D(64, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv81)
up9 = keras.layers.concatenate([keras.layers.Conv2DTranspose(32, (2, 2), strides=(2, 2), padding="same")(conv82), block1_conv2], axis=3)
conv91=keras.layers.Conv2D(32, 3, activation="relu", padding="same", kernel_initializer="he_normal")(up9)
conv92=keras.layers.Conv2D(32, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv91)
conv93=keras.layers.Conv2D(1, (1, 1), activation="sigmoid")(conv92)
model = keras.models.Model(input=[inputs], output=[conv93])
for layer in model.layers[:19]:
layer.trainable = False
model.compile(optimizer=keras.optimizers.Adam(lr=1e-5), loss=metric.dice_coef_loss,
metrics=[metric.dice_coef, "accuracy"])
model.summary()
return model
I am training with:
with h5py.File(parms.training, "r") as trainingsFile:
wrk=trainingsFile["work"].value
np.random.seed(42)
np.random.shuffle(wrk)
limit=int(wrk.shape[0]*0.8)
trainData=wrk[:limit]
valData=wrk[limit:]
trainGen=DataGenerator(trainData, parms.batchSize)
valGen=DataGenerator(valData, parms.batchSize)
bestCheckpoint = keras.callbacks.ModelCheckpoint("best.h5",
monitor="val_loss",
save_best_only=True,
save_weights_only=False)
regCheckpoint = keras.callbacks.ModelCheckpoint("checkpoint-{epoch:04d}.h5", period=10)
csvLog = keras.callbacks.CSVLogger("log.csv", append=True)
runName = datetime.datetime.now().isoformat("#")[:19].replace(":", "-")
tensorBoard = keras.callbacks.TensorBoard(log_dir="./logs/%s/" % runName)
lrPlateau = keras.callbacks.ReduceLROnPlateau(monitor="val_loss", factor=0.2, patience=10, cooldown=5)
model.fit_generator(trainGen,
epochs=parms.epochs,
steps_per_epoch=trainGen.__len__(),
validation_data=valGen,
validation_steps=valGen.__len__(),
callbacks=[bestCheckpoint, regCheckpoint, csvLog, tensorBoard, lrPlateau],
use_multiprocessing=False,
)
The DataGenerator is defined as:
class DataGenerator(keras.utils.Sequence):
def __init__(self, data, batchSize):
self.data=data
self.batchSize=batchSize
def __len__(self):
return int((self.data.shape[0]+self.batchSize-1)/(self.batchSize))
def __getitem__(self, item):
X=np.zeros((self.batchSize, self.data.shape[1], self.data.shape[2], 3), dtype=np.float32)
Y=np.zeros((self.batchSize, self.data.shape[1], self.data.shape[2]), dtype=np.float32)
j=0
wrk=np.zeros((self.data.shape[1], self.data.shape[2], self.data.shape[3]), dtype=np.float32)
for i in range(item*self.batchSize, min((item+1)*self.batchSize,self.data.shape[0])):
wrk=self.data[i, :, :, :]
if random.random() < 0.5:
wrk=wrk[:, ::-1, :]
if random.random() < 0.5:
wrk = wrk[::-1, :, :]
direction = int(random.random() * 4) * 90
if direction:
wrk = imutils.rotate(wrk, direction)
X[j, :, :, :]=wrk[:, :, 0: 3]
Y[j, :, :]=wrk[:, :, 3]
j+=1
X=X.resize((j, X.shape[1], X.shape[2], X.shape[3]))
Y=Y.resize((j, Y.shape[1], Y.shape[2]))
return X, Y
Trying to train the model results in
tensorflow.python.framework.errors_impl.InvalidArgumentError: You must feed a value for placeholder tensor 'conv2d_9_sample_weights' with dtype float and shape [?]
Even explicitly returning a sample_weight (an addtional np.ones((j), dtype=np.float32) from the DataGenerator does not solve the problem.
What's wrong?
How do I correct it?
The problem was with DataGenerator.getitem():
resize does not return a new numpy array. It changes the original array and returns nothing. Therefore the getitem method returned None, None.
The keras error messages is misleading.