I am working on a sample Neural Network with KFold cross validation using TensorFlow 2.4.1. and sklearn.
Unfortunately, I am not able to save the model.
def my_model(self,):
inputs = keras.Input(shape=(48, 48, 3))
x = layers.Conv2D(filters=4, kernel_size=self.k_size, padding='same', activation="relu")(inputs)
x = layers.BatchNormalization()(x)
x = layers.MaxPool2D()(x)
x = layers.Flatten()(x)
output = layers.Dense(10, activation='softmax')(x)
model = keras.Model(inputs=inputs, outputs=output)
model.compile(optimizer='adam',
loss=[keras.losses.SparseCategoricalCrossentropy(from_logits=True)],
metrics=['accuracy'])
return model
def train_model(self):
try:
os.mkdir('model/saved_models')
except OSError:
pass
try:
os.mkdir('model/saved_graphs')
except OSError:
pass
kf = KFold(n_splits=3)
for train_index, test_index in kf.split(self.x_train):
x_train, x_test = self.x_train[train_index], self.x_train[test_index]
y_train, y_test = self.y_train[train_index], self.y_train[test_index]
model = self.my_model()
print(model.summary())
trained_model = model.fit(x_train, y_train, epochs=self.epochs, steps_per_epoch=10, verbose=2)
trained_model = trained_model.history
print('Model evaluation', model.evaluate(x_test, y_test, verbose = 2))
trained_model.save(f'model/saved_models/dummy_model_{date}')
return trained_model
I am getting a following error:
trained_model.save(f'model/saved_models/dummy_model_{date}')
AttributeError: 'dict' object has no attribute 'save'
I am not able to think of a way to take the trained model out of the for loop. And this might be the possible reason I can think of for this problem.
Can anybody suggest how we can solve this issue? Or is there any other way to build a ANN with KFold?
Thanks.
Yea your code has some typo:
trained_model = trained_model.history # This is your train stats, so your train stats is a dictionary
model.save(f'model/saved_models/dummy_model_{date}') # This is what your saving the actual model
Related
I need to run gridsearch CV on a Keras model but keep running into the following error:
TypeError: Only integers, slices (:), ellipsis (...), tf.newaxis (None) and scalar tf.int32/tf.int64 tensors are valid indices, got array([20000, 20001, 20002, ..., 59997, 59998, 59999])
on line grid_result = grid.fit(x_train, y_train)
The code to run the Gridsearch CV is as follows:
batch_size = 128
epochs = 20
model_CV = KerasClassifier(build_fn=create_model,epochs=epochs,batch_size=batch_size, verbose=0)
define the grid search parameters
init_mode = ['uniform', 'normal', 'he_normal','he_uniform']
param_grid = dict(init_mode=init_mode)
grid = GridSearchCV(estimator=model_CV,param_grid=param_grid, cv=3)
grid_result = grid.fit(x_train, y_train)
create_model used above
def create_model(init_mode='uniform'):
model = Sequential()
model.add(Dense(64, kernel_initializer=init_mode,
activation=tf.nn.relu, input_dim=784))
model.add(Dropout(rate=0.5))
model.add(Dense(64, kernel_initializer=init_mode,
activation=tf.nn.relu))
model.add(Dense(10, kernel_initializer=init_mode, activation=tf.nn.softmax))
compile model
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
return model
Data Source
mnist = keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
Data Preprocessing
flatten = tf.keras.layers.Flatten(input_shape=[28,28])
x_train = flatten(x_train)
x_train = x_train / 255
from tensorflow.keras.utils import to_categorical
y_train = to_categorical(y_train, num_classes = num_classes)
I tried changing y_train by flattening it or not running to_categorical on y_train but I still run into the same issue.
Is the problem with x_train or y_train and how can I fix it? Thank you for any help provided.
I am training deepfake image detection using Tensorflow, but the validation accuracy is stuck at 67. I have tried to use different optimizers, but it's not decreasing and only floating around the same score.
Here is my step to creating the model.
Importing data from the image folder
Create an ImageDataGenerator object to do some augmentation.
datagen = ImageDataGenerator(
horizontal_flip=True,
validation_split=0.2,
rescale=1./255,
)
Creating the model
image dimension: 299, 299, 3
input_layer = Input(shape = (image_dimensions['height'], image_dimensions['width'], image_dimensions['channels']))
base_model = keras.applications.EfficientNetB5(
weights='imagenet',
input_shape=(image_dimensions['height'], image_dimensions['width'], image_dimensions['channels']),
include_top=False)
base_model.trainable = False
x = base_model(input_layer, training=False)
# Add pooling layer or flatten layer
y = GlobalAveragePooling2D()(x)
y = Dense(512, activation='relu')(y)
y = Dropout(0.4)(y)
y = Dense(256)(y)
# Add final dense layer
output_layer = Dense(1, activation='sigmoid')(y)
model = Model(inputs=input_layer, outputs=output_layer)
Training
efficientNet = EfficientNet(learning_rate = 0.001)
efficientNet.summary()
history = efficientNet.fit(datagen.flow(X_train, y_train, batch_size=64, subset='training'),
epochs=10,
validation_data=datagen.flow(X_train, y_train, batch_size=64, subset='validation'))
Result
Here is the result of the model training
Is there anyway I can fix this problem?
I am trying to train a graph convolutional neural network using the StellarGraph library. I would like to run this example https://stellargraph.readthedocs.io/en/stable/demos/graph-classification/gcn-supervised-graph-classification.html
but without the N-Fold Crossvalidation by providing my own training, validation and test sets. This is the code I am using (taken from this post)
generator = PaddedGraphGenerator(graphs=graphs)
train_gen = generator.flow([x for x in range(0, len(graphs_train))],
targets=graphs_train_labels,
batch_size=35)
test_gen = generator.flow([x for x in range(len(graphs_train),len(graphs_train) + len(graphs_test))],
targets=graphs_test_labels,
batch_size=35)
# Stopping criterium
es = EarlyStopping(monitor="val_loss",
min_delta=0,
patience=20,
restore_best_weights=True)
# Model definition
gc_model = GCNSupervisedGraphClassification(layer_sizes=[64, 64],
activations=["relu", "relu"],
generator=generator,
dropout=0.5)
x_inp, x_out = gc_model.in_out_tensors()
predictions = Dense(units=32, activation="relu")(x_out)
predictions = Dense(units=16, activation="relu")(predictions)
predictions = Dense(units=1, activation="sigmoid")(predictions)
# Creating Keras model and preparing it for training
model = Model(inputs=x_inp, outputs=predictions)
model.compile(optimizer=Adam(0.001), loss=binary_crossentropy, metrics=["acc"])
# GNN Training
history = model.fit(train_gen, epochs=10, validation_data=test_gen, verbose=1)
model.fit(x=graphs_train,
y=graphs_train_labels,
epochs=10,
verbose=1,
callbacks=[es])
# Calculate performance on the validation data
test_metrics = model.evaluate(valid_gen, verbose=1)
valid_acc = test_metrics[model.metrics_names.index("acc")]
print(f"Test Accuracy model = {valid_acc}")
But at the end I am getting this error
ValueError: Failed to find data adapter that can handle input: (<class 'list'> containing values of types {"<class 'stellargraph.core.graph.StellarGraph'>"}), <class 'numpy.ndarray'>
What am I missing here? Is it because of the way I have created the graphs? In my case the graphs is a list which contains the stellar graphs
Problem solved. I was calling
model.fit(x=graphs_train,
y=graphs_train_labels,
epochs=10,
verbose=1,
callbacks=[es])
after the line
history = model.fit(train_gen, epochs=10, validation_data=test_gen, verbose=1)
I am building a model to classify sequence class. firstly i build the model use keras API. As we know the keras API packed the tensorflow function, but when i convert the keras code to tensorflow API, i found the result of two framwork is different. Below is the key code.
tensorflow code
x = tf.placeholder(tf.int32, shape=[None, time_steps], name='x_input')
y = tf.placeholder(tf.float32, shape=[None, num_classes], name='y_label')
定义网络结构
def rnn_model(x):
x = tf.one_hot(x,api_vob_size)
rnn_cell_fw = tf.nn.rnn_cell.BasicLSTMCell(rnn_size)
rnn_cell_bw = tf.nn.rnn_cell.BasicLSTMCell(rnn_size)
# 将输入送入rnn,得到输出与中间状态,输出shape为[batch_size, time_steps, rnn_size]
outputs, states = tf.nn.bidirectional_dynamic_rnn(rnn_cell_fw,rnn_cell_bw, x, dtype=tf.float32)
# 获取最后一个时刻的输出,输出shape为[batch_size, rnn_size]
outputs1 = tf.concat(outputs, 2)
output = tf.transpose(outputs1, [1, 0, 2])[-1]
# 全连接层,最终输出大小为[batch_size, num_classes]
fc_w = tf.Variable(tf.random_normal([2*rnn_size, num_classes]))
fc_b = tf.Variable(tf.random_normal([num_classes]))
return tf.matmul(output, fc_w) + fc_b `
# 构建网络
logits= rnn_model(x)
prediction = tf.nn.softmax(logits)
# 定义损失函数与优化器
loss_op = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits, name='cross_entropy'))
optimizer = tf.train.AdamOptimizer(learning_rate=lr)
train_op = optimizer.minimize(loss_op,name='optimizer_min')
#keras API
model = Sequential()
model.add(Bidirectional(LSTM(units=150), merge_mode='concat'))
model.add(Dense(9, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(x_train, y_train, epochs=10, batch_size=64)
so why two code block has different result. thank you for answer !!!!
So, I need to concatenate an input to the flattened layer before going in the dense layer.
I'm using Keras with TF as backend.
model.add(Flatten())
aux_input = Input(shape=(1, ))
model.add(Concatenate([model, aux_input]))
model.add(Dense(512,kernel_regularizer=regularizers.l2(weight_decay)))
I have a scenario like this: X_train, y_train, aux_train. The shape of y_train and aux_train is same (1, ). An image has a ground-truth and an aux_input.
How do I add this aux_input to the model while doing model.fit?
As suggested in answers, I changed my model with functional api. However, now, I get the following error.
ValueError: Layer dense_1 was called with an input that isn't a
symbolic tensor. Received type: . Full input:
[]. All
inputs to the layer should be tensors.
Here's the code for that part.
flatten = Flatten()(drop_5)
aux_rand = Input(shape=self.aux_shape)
concat = Concatenate([flatten, aux_input])
fc1 = Dense(512, kernel_regularizer=regularizers.l2(weight_decay))(concat)
Shape of aux input
aux_shape = (1,)
And then calling the model as follow
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
aux_rand = np.random.rand(y_train.shape[0])
model_inst = cifar10vgg()
x_train_input = Input(shape=(32,32,3))
aux_input = Input(shape=(1,))
model = Model(inputs=[x_train_input, aux_input], output=model_inst.build_model())
model.fit(x=[x_train, aux_rand], y=y_train, batch_size=batch_size, steps_per_epoch=x_train.shape[0] // batch_size,
epochs=maxepoches, validation_data=(x_test, y_test),
callbacks=[reduce_lr, tensorboard], verbose=2)
model_inst.build_model() returns Activation('softmax')(fc2) which is the output to be fed into the Model (as far as I understood)
As I see from your code, you implement the model with sequential API which is not a good option in this case. If you have some auxiliary inputs the best way to implement such a feature is to use functional API.
Here is a example from Keras website:
from keras.layers import Input, Embedding, LSTM, Dense
from keras.models import Model
main_input = Input(shape=(100,), dtype='int32', name='main_input')
x = Embedding(output_dim=512, input_dim=10000, input_length=100)(main_input)
lstm_out = LSTM(32)(x)
auxiliary_output = Dense(1, activation='sigmoid', name='aux_output')(lstm_out)
auxiliary_input = Input(shape=(5,), name='aux_input')
x = keras.layers.concatenate([lstm_out, auxiliary_input])
x = Dense(64, activation='relu')(x)
main_output = Dense(1, activation='sigmoid', name='main_output')(x)
model = Model(inputs=[main_input, auxiliary_input], outputs=[main_output, auxiliary_output])
Based on description, I think following code can give you some intuition:
x1 = Input(shape=(32, 32, 3))
flatten1 = Flatten()(x1)
x2 = Input(shape=(244, 244, 3))
vgg = VGG19(weights='imagenet', include_top=False)(x2)
flatten2 = Flatten()(vgg)
concat = Concatenate()([flatten1, flatten2])
d = Dense(10)(concat)
model = Model(inputs=[x1, x2], outputs=[d])
model.compile('adam', 'categorical_crossentropy')
model.fit(x=[x_train1, x_train2],outputs=y_labels)