Merge two sequential models on Keras for hybrid model - tensorflow

I want to combine two sequential models for a hybrid model (with Keras 2.6.0). The first model is a succession of dense layer of a set of 4 parameters, and the second is a succession of 2D convolution of an image ((32,32)). The goal is to predict a curve of 128 points.
My actual model:
def get_model_v2(params_shape, img_shape):
params_model = models.Sequential()
params_model.add(layers.Dense(512, kernel_regularizer=regularizers.l2(0.001), activation='relu', name='Dense_n1'))
params_model.add(layers.Dense(512, kernel_regularizer=regularizers.l2(0.001), activation='relu', name='Dense_n2'))
params_model.add(layers.Dense(256, name='Output'))
img_model = models.Sequential()
img_model.add(layers.Input(img_shape, name='InputLayer2'))
img_model.add(layers.Conv2D(64, kernel_size=4, strides=2, padding="same"))
img_model.add(layers.LeakyReLU(alpha=0.2))
img_model.add(layers.Conv2D(16, kernel_size=4, strides=2, padding="same"))
img_model.add(layers.LeakyReLU(alpha=0.2))
img_model.add(layers.Flatten())
concat = tf.keras.layers.concatenate([params_model, img_model])
model = models.Sequential()
model.add(layers.Input(concat, name='InputLayer3'))
model.add(layers.Dense(256, kernel_regularizer=regularizers.l2(0.001), activation='relu', name='Dense_n1'))
model.add(layers.Dense(128, name='Output'))
model.compile(optimizer = 'adam',
loss = 'mse',
metrics = ['mae', 'mse'])
return model
model = get_model_v2 ( (4,), (32, 32, 1) )
My problem is when I have to combine the two models, I don't know what to use, with this "concatenate" example I have an error like: TypeError: 'NoneType' object is not subscriptable. I understand the problem, but I can't find an other solution...

Few issues here,
You are not using params_shape for your params_model (which comes out with an undefined shape).
As you understood, you can't concatenate models with a concatenation layer
The final model needs to through the Functional API
You got a bunch of layers with same name - you cannot have the same name for two layers in the same model
import tensorflow.keras.layers as layers
import tensorflow.keras.models as models
import tensorflow.keras.regularizers as regularizers
import tensorflow as tf
def get_model_v2(params_shape, img_shape):
params_model = models.Sequential()
params_model.add(layers.Dense(512, kernel_regularizer=regularizers.l2(0.001), activation='relu', name='Dense_n1', input_shape=params_shape))
params_model.add(layers.Dense(512, kernel_regularizer=regularizers.l2(0.001), activation='relu', name='Dense_n2'))
params_model.add(layers.Dense(256, name='Output'))
img_model = models.Sequential()
img_model.add(layers.Input(img_shape, name='InputLayer2'))
img_model.add(layers.Conv2D(64, kernel_size=4, strides=2, padding="same"))
img_model.add(layers.LeakyReLU(alpha=0.2))
img_model.add(layers.Conv2D(16, kernel_size=4, strides=2, padding="same"))
img_model.add(layers.LeakyReLU(alpha=0.2))
img_model.add(layers.Flatten())
param_out = params_model.outputs[0]
img_out = img_model.outputs[0]
concat_out = tf.keras.layers.concatenate([param_out, img_out])
full_dense_out = layers.Dense(256, kernel_regularizer=regularizers.l2(0.001), activation='relu', name='Dense_n3')(concat_out)
final_out = layers.Dense(128, name='Output_final')(full_dense_out)
model = models.Model(inputs=[params_model.inputs, img_model.inputs], outputs=final_out)
model.summary()
model.compile(optimizer = 'adam',
loss = 'mse',
metrics = ['mae', 'mse'])
return model
model = get_model_v2 ( (4,), (32, 32, 1) )

Related

Modern syntax for old-fashioned Keras command "Convolution2D"?

I was studying different CNN architectures to predict the CIFAR10 dataset, and I found this interesting Github repository:
https://gist.github.com/wielandbrendel/ccf1ff6f8f92139439be
I tried to run the model, but it was created in 6 years ago and the following Keras command is no longer valid:
model.add(Convolution2D(32, 3, 3, 3, border_mode='full'))
How is this command translated into the modern Keras syntax for Conv2D?
I get an error in Keras when I try to input the sequence of integers in Convolution2D(32, 3, 3, 3, ...)?
I guess 32 is the number of channels, and then we specify a 3x3 kernel size, but I am not sure about the meaning of the last 3 mentioned (4th position).
PS. Changing border_mode into padding = 'valid' or 'same' returns the following error:
model.add(Convolution2D(32, 3, 3, 3, padding='valid'))
TypeError: __init__() got multiple values for argument 'padding'
The gist there you're following is backdated and also has some issues. You don't need to follow this now. Here is the updated version of it. Try this.
Imports and DataSet
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (Dense, Dropout, Activation,
Flatten, Conv2D, MaxPooling2D)
from tensorflow.keras.optimizers import SGD, Adadelta, Adagrad
import tensorflow as tf
# parameters
batch_size = 32
nb_classes = 10
nb_epoch = 5
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = tf.keras.datasets.cifar10.load_data()
# convert class vectors to binary class matrices
Y_train = tf.keras.utils.to_categorical(y_train, nb_classes)
Y_test = tf.keras.utils.to_categorical(y_test, nb_classes)
# train model
X_train = X_train.astype("float32") / 255
X_test = X_test.astype("float32") / 255
X_train.shape, y_train.shape, X_test.shape, y_test.shape
((50000, 32, 32, 3), (50000, 1), (10000, 32, 32, 3), (10000, 1))
Modeling
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(3, 3),
strides=(1, 1), activation='relu', padding="same"))
model.add(Activation('relu'))
model.add(Conv2D(filters=32, kernel_size=(3, 3),
strides=(1, 1), activation='relu', padding="same"))
model.add(Activation('relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=32, kernel_size=(3, 3),
strides=(1, 1), activation='relu', padding="same"))
model.add(Activation('relu'))
model.add(Conv2D(filters=32, kernel_size=(3, 3),
strides=(1, 1), activation='relu', padding="same"))
model.add(Activation('relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
# let's train the model using SGD + momentum (how original).
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
Compile and Run
model.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch)
# test score & top 1 performance
score = model.evaluate(X_test, Y_test, batch_size=batch_size)
y_hat = model.predict(X_test)
yhat = np.argmax(y_hat, 1)
top1 = np.mean(yhat == np.squeeze(y_test))
print('Test score/Top1', score, top1)
The Convolutional2D is now named Conv2D, but there is still an alias for Convolutional2D, so that's not a problem.
The border_mode argument is not available anymore, the equivalent is padding, with options valid or same.
Try both to see if any of those fits the shapes of the outputs and allows to code to work.

Merging tensors based on a key

I am dealing with a problem in which network design is such that it requires merging output of one part of the network with a tabular input(other input) data based on a key and training the network further with the merged data. It appeared that there is no way two tensors can be merged based on a key. Hence though of converting tensor to numpy to pandas data and them merging. The merged data would be converted back to tensor and used further in the network. Below is the code for it:
def build_convnet(shape=(112, 112, 1)):
from keras.layers import Conv2D, BatchNormalization, MaxPool2D, GlobalMaxPool2D
momentum = .9
model = keras.Sequential()
model.add(Conv2D(64, (3,3), input_shape=shape,
padding='same', activation='relu'))
model.add(Conv2D(64, (3,3), padding='same', activation='relu'))
model.add(BatchNormalization(momentum=momentum))
model.add(MaxPool2D())
model.add(Conv2D(128, (3,3), padding='same', activation='relu'))
model.add(Conv2D(128, (3,3), padding='same', activation='relu'))
model.add(BatchNormalization(momentum=momentum))
model.add(MaxPool2D())
model.add(Conv2D(256, (3,3), padding='same', activation='relu'))
model.add(Conv2D(256, (3,3), padding='same', activation='relu'))
model.add(BatchNormalization(momentum=momentum))
model.add(MaxPool2D())
model.add(Conv2D(512, (3,3), padding='same', activation='relu'))
model.add(Conv2D(512, (3,3), padding='same', activation='relu'))
model.add(BatchNormalization(momentum=momentum))
# flatten...
model.add(GlobalMaxPool2D())
return model
def action_model(shape=(3, 112, 112, 1)):
from keras.layers import TimeDistributed, GRU, Dense, Dropout, Concatenate
# Create our convnet with (224, 224, 3) input shape
convnet = build_convnet(shape[1:])
# then create our final model
model = keras.Sequential()
# add the convnet with (5, 224, 224, 3) shape
model.add(TimeDistributed(convnet, input_shape=shape))
# here, you can also use GRU or LSTM
model.add(GRU(64))
# and finally, we make a decision network
model.add(Dense(1024, activation='relu'))
model.add(Dropout(.5))
model.add(Dense(512, activation='relu'))
model.add(Dropout(.5))
model.add(Dense(128, activation='relu'))
model.add(Dropout(.5))
model.add(Dense(64, activation='relu'))
model.add(Dense(4, activation='relu'))
return model
# create the tab_data and cnn_gru models
tab_dt = keras.Input(shape=(trainX.shape[1],))
cnn_gru = action_model(X_train.shape[1:])
# converting tensor to numpy array and merging with a tabular data on a key(Patient)
cnn_gru_np = cnn_gru.output.eval()
cnn_gru_pd = pd.Dataframe(cnn_gru_np, names = ["V1", "V2", "V3", "V4"])
cnn_gru_pd["Patient"] = train_p
tab_dt_np = tab_dt.eval()
tab_dt_pd = pd.Dataframe(tab_dt_np, names = ["Weeks", "Percent", "Age", "Sex_Male", "SmokingStatus_Ex-smoker", "SmokingStatus_Never smoked"])
tab_dt_pd["Patient"] = train_p.numpy()
combinedInput_pd = pd.merge(tab_dt_pd, cnn_gru_pd, on = ["Patient"], how = "left")
combinedInput_pd.drop(["Patient"], axis = 1, inplace = True)
combinedInput_np = np.array(combinedInput_pd)
combinedInput = tf.convert_to_tensor(combinedInput_np)
# being our regression head
x = Dense(8, activation="relu")(combinedInput)
x = Dense(1, activation="relu")(x)
model = Model(inputs=[tab_dt, cnn_gru.input], outputs=x)
I am getting the below error for eval function in the line "cnn_gru_np = cnn_gru.output.eval()"
ValueError: Cannot evaluate tensor u`enter code here`sing `eval()`: No default session is registered. Use `with sess.as_default()` or pass an explicit session to `eval(session=sess)`
Please help with suggesting what is going wrong here.
The reason you're getting a ValueError is that the output of a keras model isn't an eager tensor, and thus does not support eval like that.
Just try
some_model = keras.Sequential([keras.layers.Dense(10, input_shape=(5,))])
print(type(some_model.output))
print(type(tf.zeros((2,))))
some_model.output.eval()
# <class 'tensorflow.python.framework.ops.Tensor'>
# <class 'tensorflow.python.framework.ops.EagerTensor'>
# ValueError
However, there is a bigger problem with your approach: there is no connected computation graph from your models inputs to your models outputs because none of the pandas stuff are tensorflow ops. I.E. even if you were able to use eager tensors, you still wouldn't be able to train your model with automatic differentiation.
You're going to have to specify your entire model in tf I'm afraid.
Maybe you could do the data processing before giving it as input to the model? Then you only need split concat ops to put everything together?

How to customise a CNN layers with TensorFlow 2, Feed new inputs at Dense Layers of CNN [duplicate]

I have 1D sequences which I want to use as input to a Keras VGG classification model, split in x_train and x_test. For each sequence, I also have custom features stored in feats_train and feats_test which I do not want to input to the convolutional layers, but to the first fully connected layer.
A complete sample of train or test would thus consist of a 1D sequence plus n floating point features.
What is the best way to feed the custom features first to the fully connected layer? I thought about concatenating the input sequence and the custom features, but I do not know how to make them separate inside the model. Are there any other options?
The code without the custom features:
x_train, x_test, y_train, y_test, feats_train, feats_test = load_balanced_datasets()
model = Sequential()
model.add(Conv1D(10, 5, activation='relu', input_shape=(timesteps, 1)))
model.add(Conv1D(10, 5, activation='relu'))
model.add(MaxPooling1D(pool_size=2))
model.add(Dropout(0.5, seed=789))
model.add(Conv1D(5, 6, activation='relu'))
model.add(Conv1D(5, 6, activation='relu'))
model.add(MaxPooling1D(pool_size=2))
model.add(Dropout(0.5, seed=789))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5, seed=789))
model.add(Dense(2, activation='softmax'))
model.compile(loss='logcosh', optimizer='adam', metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=batch_size, epochs=20, shuffle=False, verbose=1)
y_pred = model.predict(x_test)
Sequential model is not very flexible. You should look into the functional API.
I would try something like this:
from keras.layers import (Conv1D, MaxPool1D, Dropout, Flatten, Dense,
Input, concatenate)
from keras.models import Model, Sequential
timesteps = 50
n = 5
def network():
sequence = Input(shape=(timesteps, 1), name='Sequence')
features = Input(shape=(n,), name='Features')
conv = Sequential()
conv.add(Conv1D(10, 5, activation='relu', input_shape=(timesteps, 1)))
conv.add(Conv1D(10, 5, activation='relu'))
conv.add(MaxPool1D(2))
conv.add(Dropout(0.5, seed=789))
conv.add(Conv1D(5, 6, activation='relu'))
conv.add(Conv1D(5, 6, activation='relu'))
conv.add(MaxPool1D(2))
conv.add(Dropout(0.5, seed=789))
conv.add(Flatten())
part1 = conv(sequence)
merged = concatenate([part1, features])
final = Dense(512, activation='relu')(merged)
final = Dropout(0.5, seed=789)(final)
final = Dense(2, activation='softmax')(final)
model = Model(inputs=[sequence, features], outputs=[final])
model.compile(loss='logcosh', optimizer='adam', metrics=['accuracy'])
return model
m = network()

how to convert Tensor objects into numpy array?

i have built and trained a CNN, and i want to get the wieghts of the first dense layer as numpy array . after i trained the model i loaded the model using this code
f = Path("model_structure.json")
model_structure = f.read_text()
model_wieghts = model_from_json(model_structure)
model_wieghts.load_weights("model_weights.h5")
in order to get the wieghts of the first dense layer i used :
wieghts_tf = model_wieghts.layers[9].output
wieghts_tf has this value:
<tf.Tensor 'dense_1/Relu:0' shape=(?, 496) dtype=float32>
the question is , i want to convert the type of wieghts_tf from tensor to numpy array . so i created a session and used the eval() function to do so . as shown below :
sess = tf.Session()
with sess.as_default() :
vector = wieghts_tf.eval()
but im getting this error
InvalidArgumentError: You must feed a value for placeholder tensor 'conv2d_1_input' with dtype float and shape [?,180,180,3]
how can i solve it ?
here is the code of the CNN model :
#creating nueral network
model = Sequential()
conv1_2d = model.add(Conv2D(180, (3, 3), padding='same', input_shape=(180, 180, 3), activation="relu")) #180 is the number of filters
conv2_2d = model.add(Conv2D(180, (3, 3), activation="relu"))
max_pool1 = model.add(MaxPooling2D(pool_size=(3, 3)))
drop_1 = model.add(Dropout(0.25))
conv3_2d =model.add(Conv2D(360, (3, 3), padding='same', activation="relu"))
conv4_2d =model.add(Conv2D(360, (3, 3), activation="relu"))
max_pool2 = model.add(MaxPooling2D(pool_size=(3, 3)))
drop_2 = model.add(Dropout(0.25))
flat = model.add(Flatten())
dense_1 = model.add(Dense(496, activation="relu"))
drop_3 = model.add(Dropout(0.5))
dense_2 = dense_layer = model.add(Dense(376, activation="softmax"))
model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
model.fit(
train_data,
train_label,
batch_size=32,
epochs=40,
verbose = 2 ,
validation_split=0.1,
shuffle=True)
# Save neural network structure
model_structure = model.to_json()
f = Path("model_structure.json")
f.write_text(model_structure)
# Save neural network's trained weights
model.save_weights("model_weights.h5")
Found the solution:
x = np.frombuffer(layer.convolution.weights.float16Value, dtype=np.float16)

How to do early stopping with tensorflow.models.Sequential()?

Using a sequential model generated like this:
def generate_model():
model = Sequential()
model.add(Conv1D(64, kernel_size=10, strides=1,
activation='relu', padding='same',
input_shape=(MAXLENGTH, NAMESPACELENGTH)))
model.add(MaxPooling1D(pool_size=4, strides=2))
model.add(Conv1D(32, 3, activation='relu', padding='same'))
model.add(MaxPooling1D(pool_size=4))
model.add(Flatten())
model.add(Dense(10, activation='relu'))
model.add(Dense(1, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='adam', metrics=['mean_squared_error'])
return model
I want to do Kfold cross-validated modeling. So, I train K models in a loop:
models = []
for ndx_train, ndx_val in kfold.split(X, y):
model = generate_model()
N_train = len(ndx_train)
X_batch = X[ndx_train]
y_batch = y[ndx_train]
model.fit(X_batch, y_batch, epochs=100, verbose=1, steps_per_epoch=10,
validation_data=(X[ndx_val], y[ndx_val]), validation_steps=100)
models.append(model)
Now, I can see when I want each model to stop by looking at the output. I.e. when the validation error increases again. Is it possible to do that easily with pure tf and with this higher level api setup? There is some suggestions using along the lines using tflearn here.
By using EarlyStopping callback:
from tensorflow.keras.callbacks import EarlyStopping
callbacks = [
EarlyStopping(monitor='val_mean_squared_error', patience=2, verbose=1),
]
model.fit(..., callbacks=callbacks)