Is it possible to use Keras tuner for tuning a NN using Time Series Split , similar to sklearn.model_selection.TimeSeriesSplit in sklearn.
For example consider a sample tuner class from https://towardsdatascience.com/hyperparameter-tuning-with-keras-tuner-283474fbfbe
from kerastuner import HyperModel
class SampleModel(HyperModel):
def __init__(self, input_shape):
self.input_shape = input_shape
def build(self, hp):
model = Sequential()
model.add(
layers.Dense(
units=hp.Int('units', 8, 64, 4, default=8),
activation=hp.Choice(
'dense_activation',
values=['relu', 'tanh', 'sigmoid'],
default='relu'),
input_shape=input_shape
)
)
model.add(layers.Dense(1))
model.compile(
optimizer='rmsprop',loss='mse',metrics=['mse']
)
return model
tuner:
tuner_rs = RandomSearch(
hypermodel,
objective='mse',
seed=42,
max_trials=10,
executions_per_trial=2)
tuner_rs.search(x_train_scaled, y_train, epochs=10, validation_split=0.2, verbose=0)
So instead of validation_split = 0.2, in the above line is it possible to do the following
from sklearn.model_selection import TimeSeriesSplit
#defining a time series split object
tscv = TimeSeriesSplit(n_splits = 5)
#using that in Keras Tuner
tuner_rs.search(x_train, y_train, epochs=10, validation_split=tscv, verbose=0)
I solved in this way:
First I have istanciated a class that allows to perform Blocking Time Series Split. I found out that it might be better to use this time series split rather than Sklearn TimeSeriesSplit because we won't make our model train on instances with already seen data. As you can see from the picture, if number of splits is 5, BTSS will divide your training data in 5 parts with only the validation data in common across the splits. (Since StackOverflow doesn't allow me to upload images i'll post a reference link: https://hub.packtpub.com/cross-validation-strategies-for-time-series-forecasting-tutorial/)
class BlockingTimeSeriesSplit():
def __init__(self, n_splits):
self.n_splits = n_splits
def get_n_splits(self, X, y, groups):
return self.n_splits
def split(self, X, y=None, groups=None):
n_samples = len(X)
k_fold_size = n_samples // self.n_splits
indices = np.arange(n_samples)
margin = 0
for i in range(self.n_splits):
start = i * k_fold_size
stop = start + k_fold_size
mid = int(0.8 * (stop - start)) + start
yield indices[start: mid], indices[mid + margin: stop]
Then you will proceed by creating your own model:
def build_model(hp):
pass
Finally you can create your CVtuner as a class which will call back BlockingTimeSeriesSplit.
class CVTuner(kt.engine.tuner.Tuner):
def run_trial(self, trial, x, y, *args, **kwargs):
cv = BlockingTimeSeriesSplit(n_splits=5)
val_accuracy_list = []
batch_size = trial.hyperparameters.Int('batch_size', 0, 64, step=8)
epochs = trial.hyperparameters.Int('epochs', 10, 100, step=10)
for train_indices, test_indices in cv.split(x):
x_train, x_test = x[train_indices], x[test_indices]
y_train, y_test = y[train_indices], y[test_indices]
model = self.hypermodel.build(trial.hyperparameters)
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs)
val_loss, val_accuracy, val_auc = model.evaluate(x_test, y_test)
val_accuracy_list.append(val_accuracy)
self.oracle.update_trial(trial.trial_id, {'val_accuracy': np.mean(val_accuracy_list)})
self.save_model(trial.trial_id, model)
tuner = CVTuner(oracle=kt.oracles.BayesianOptimization(objective='val_accuracy',max_trials=1), hypermodel=create_model)
stop_early = tf.keras.callbacks.EarlyStopping(monitor='accuracy', patience=10)
tuner.search(X, Y, callbacks=[stop_early])
best_model = tuner.get_best_models()[0]
best_model.summary()
best_model.evaluate(x_out_of_sample, y_out_of_sample)
Related
I'm working on a CNN classification problem. I used keras and a pre-trained model. Now I want to evaluate my model and need the precision, recall and f1-Score. When I use sklearn.metrics classification_report I get above error. I know where the numbers are coming from, first is the length of my test dataset in batches and second are the number of actual sampels (predictions) in there. However I don't know how to "convert" them.
See my code down below:
# load train_ds
train_ds = tf.keras.utils.image_dataset_from_directory(
directory ='/gdrive/My Drive/Flies_dt/224x224',
image_size = (224, 224),
validation_split = 0.40,
subset = "training",
seed = 123,
shuffle = True)
# load val_ds
val_ds = tf.keras.utils.image_dataset_from_directory(
directory ='/gdrive/My Drive/Flies_dt/224x224',
image_size = (224, 224),
validation_split = 0.40,
subset = "validation",
seed = 123,
shuffle = True)
# move some batches of val_ds to test_ds
test_ds = val_ds.take((1*len(val_ds)) // 2)
print('test_ds =', len(test_ds))
val_ds = val_ds.skip((1*len(val_ds)) // 2)
print('val_ds =', len(val_ds)) #test_ds = 18 val_ds = 18
# Load Model
base_model = keras.applications.vgg19.VGG19(
include_top=False,
weights='imagenet',
input_shape=(224,224,3)
)
# Freeze base_model
base_model.trainable = False
#
inputs = keras.Input(shape=(224,224,3))
x = data_augmentation(inputs) #apply data augmentation
# Preprocessing
x = tf.keras.applications.vgg19.preprocess_input(x)
# The base model contains batchnorm layers. We want to keep them in inference mode
# when we unfreeze the base model for fine-tuning, so we make sure that the
# base_model is running in inference mode here.
x = base_model(x, training=False)
x = keras.layers.GlobalAveragePooling2D()(x)
x = keras.layers.Dropout(0.2)(x) # Regularize with dropout
outputs = keras.layers.Dense(5, activation="softmax")(x)
model = keras.Model(inputs, outputs)
model.compile(
loss="sparse_categorical_crossentropy",
optimizer="Adam",
metrics=['acc']
)
model.fit(train_ds, epochs=8, validation_data=val_ds, callbacks=[tensorboard_callback])
# Unfreeze the base_model. Note that it keeps running in inference mode
# since we passed `training=False` when calling it. This means that
# the batchnorm layers will not update their batch statistics.
# This prevents the batchnorm layers from undoing all the training
# we've done so far.
base_model.trainable = True
model.summary()
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=0.000001), # Low learning rate
loss="sparse_categorical_crossentropy",
metrics=['acc']
)
model.fit(train_ds, epochs=5, validation_data=val_ds)
#Evaluate
from sklearn.metrics import classification_report
y_pred = model.predict(test_ds, batch_size=64, verbose=1)
y_pred_bool = np.argmax(y_pred, axis=1)
print(classification_report(test_ds, y_pred_bool))
I also tried something like this, but I'm not sure if this gives me the correct values for multiclass classification.
from keras import backend as K
def recall_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def f1_m(y_true, y_pred):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
# compile the model
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc',f1_m,precision_m, recall_m])
# fit the model
history = model.fit(Xtrain, ytrain, validation_split=0.3, epochs=10, verbose=0)
# evaluate the model
loss, accuracy, f1_score, precision, recall = model.evaluate(Xtest, ytest, verbose=0)
This is a lot, Sorry. Hope somebody can help.
I am comparing two simple single dense layer regressors. The two regressors are only different in a way that the 2nd one I pass in index of data in the call function rather than real data and use index to retrieve data for training.
class DenseRegressorV1(tf.keras.Model):
def __init__(
self,
*args,
**kwargs,
):
super(DenseRegressorV1, self).__init__(*args, **kwargs)
self.dense_layer = layers.Dense(units=1, name="dense")
def call(self, inputs):
logits = self.dense_layer(inputs)
return logits
class DenseRegressorV2(tf.keras.Model):
def __init__(
self,
data,
*args,
**kwargs,
):
super(DenseRegressorV2, self).__init__(*args, **kwargs)
self.data = data
self.dense_layer = layers.Dense(units=1, name="dense")
def call(self, inputs_idx):
inputs = tf.gather(self.data, inputs_idx)
logits = self.dense_layer(inputs)
return logits
The input data is in 3D. The y is a simple linear sum of x with some noise which is easy to predict. The data is generated as below:
x_train_3D = tf.random.uniform((80000, 80, 20))
y_train_3D = tf.expand_dims(tf.reduce_sum(x_train_3D, axis=2), axis=2) + 2 * tf.random.uniform((80000, 80, 1))
x_train_3D_idx = np.array(range(0, len(x_train_3D)))
The unexpected behavior is that the V1 seems doing correct training and the val_loss converges to a very small number but V2 does not, although I think they should perform the same.
I use below code for training:
def train_model(model, x_train, y_train, num_epochs, batch_size, learning_rate):
optimizer = keras.optimizers.Adam(learning_rate)
tf.keras.metrics.RootMeanSquaredError()
model.compile(
optimizer=optimizer,
loss=tf.keras.losses.mse,
metrics=['mse'],
)
early_stopping = keras.callbacks.EarlyStopping(
monitor="val_loss", patience=20, restore_best_weights=True
)
model_history = model.fit(
x=x_train,
y=y_train,
epochs=num_epochs,
batch_size=batch_size,
validation_split=0.18,
callbacks=[early_stopping]
)
return model_history
dr_model_v1 = DenseRegressorV1()
dr_model_v2 = DenseRegressorV2(x_train_3D)
train_model(dr_model_v1, x_train_3D, y_train_3D, num_epochs=500, batch_size=1000, learning_rate=0.01)
train_model(dr_model_v2, x_train_3D_idx, y_train_3D, num_epochs=500, batch_size=1000, learning_rate=0.01)
So my questions are:
What causes the difference in the training and how we can fix for the V2 model?
If I flatten the train data into 2D before feeding into the model, I can get expected results for V2 model. Why the shape is making a difference here?
How I transformed to 2D for training with V2
x_train_2D = tf.reshape(x_train_3D, (x_train_3D.get_shape()[0] * x_train_3D.get_shape()[1], x_train_3D.get_shape()[2]))
y_train_2D = tf.reshape(y_train_3D, (y_train_3D.get_shape()[0] * y_train_3D.get_shape()[1], y_train_3D.get_shape()[2]))
I wanted to fit simple LSTM model to perform binary classification on multivariate time series data. Since my data is severely imbalanced, I have integrated class_weight argument from sklearn in my model. However, I have got pretty high loss value, and it was not decreasing with each epoch. My f1 score was 0.018 which is extremely low as well. I appreciate your suggestions!
Sample data:
sequence_length = 10
def generate_data(X, y, sequence_length = 10, step = 1):
X_local = []
y_local = []
for start in range(0, len(data) - sequence_length, step):
end = start + sequence_length
X_local.append(X[start:end])
y_local.append(y[end-1])
return np.array(X_local), np.array(y_local)
X_sequence, y = generate_data(data.loc[:, "V1":"V4"].values, data.Class)
model = keras.Sequential()
model.add(LSTM(100, input_shape = (10, 4)))
model.add(Dropout(0.5))
model.add(Dense(1, activation="sigmoid"))
model.compile(loss="binary_crossentropy"
, metrics=[keras.metrics.binary_accuracy]
, optimizer="adam")
model.summary()
training_size = int(len(X_sequence) * 0.7)
X_train, y_train = X_sequence[:training_size], y[:training_size]
X_test, y_test = X_sequence[training_size:], y[training_size:]
from sklearn.utils import class_weight
class_weights = dict(zip(np.unique(y_train), class_weight.compute_class_weight('balanced', np.unique(y_train),
y_train)))
model.fit(X_train, y_train, batch_size=64, epochs=50,class_weight=class_weights)
model.evaluate(X_test, y_test)
y_test_prob = model.predict(X_test, verbose=1)
y_test_pred = np.where(y_test_prob > 0.5, 1, 0)
from sklearn.metrics import f1_score
f1_score(y_test, y_test_pred)
I am new to tensorflow. I created a 204x4 matrix where the first 3 colums are feature and the last colum is the target. How do I need to convert the array so that tensorflow can train the data?
TRAINING_SET = np.asarray(seq[:llength])
VALIDATION_SET= np.asarray(seq[llength:llength+tlength])
TEST_SET = np.asarray(seq[llength+tlength:])
num_epochs=100
batch_size = 32
featureColumns = np.shape(TRAINING_SET)[1]
# define a function to get data as batch, you can use this function for test and validation also by simply changing shuffle=False and replacing tf.train.shuffle_batch as tf.train.batch
def data_input_fn(trainset, batch_size, num_epochs, toShuffle):
data_f = trainset[:, :(featureColumns-1)]
data_l = trainset[:, (featureColumns-1)]
data_f_single, data_l_single = tf.train.slice_input_producer([data_f, data_l], num_epochs=num_epochs, shuffle=toShuffle)
if toShuffle is True:
data_f_batch, data_l_batch = tf.train.shuffle_batch([data_f_single, data_l_single], batch_size=batch_size, capacity=400, min_after_dequeue=2*batch_size)
else:
data_f_batch, data_l_batch = tf.train.batch([data_f_single, data_l_single], batch_size=batch_size, capacity=400, min_after_dequeue=2*batch_size)
return data_f_batch, data_l_batch
def main():
# Specify that all features have real-value data
feature_columns = [tf.contrib.layers.real_valued_column("", dimension=3)]
# Build 3 layer DNN with 10, 20, 10 units respectively.
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=10,
model_dir="/tmp/iris_model")
# Fit model.
classifier.fit(input_fn=lambda: data_input_fn(TRAINING_SET, batch_size, num_epochs, True), steps=4000)
# Evaluate accuracy.
accuracy_test_score = classifier.evaluate(input_fn=lambda: data_input_fn(VALIDATION_SET, batch_size, num_epochs, False),
steps=1)["accuracy"]
accuracy_validation_score = classifier.evaluate(input_fn=lambda: data_input_fn(TEST_SET, batch_size, num_epochs, False),
steps=1)["accuracy"]
print ("\nValidation Accuracy: {0:0.2f}\nTest Accuracy: {1:0.2f}\n".format(accuracy_validation_score,accuracy_test_score))
# Classify two new flower samples.
def new_samples():
return np.array(
[[327,8,3],
[47,8,0]], dtype=np.float32)
predictions = list(classifier.predict_classes(input_fn=new_samples))
gives
TypeError: 'Tensor' object is not callable
You need use a function for the input_fn not just a tensor
TRAINING_SET = np.asarray(seq[:llength])
VALIDATION_SET= np.asarray(seq[llength:llength+tlength])
TEST_SET = np.asarray(seq[llength+tlength:])
num_epochs=100
batch_size = 32
# define a function to get data as batch, you can use this function for test and validation also by simply changing shuffle=False and replacing tf.train.shuffle_batch as tf.train.batch
def data_input_fn(trainset, batch_size, num_epochs):
data_f = trainset[:, :3]
data_l = trainset[:, 3]
data_f_single, data_l_single = tf.train.slice_input_producer([data_f, data_l], num_epochs=num_epochs, shuffle=True)
data_f_batch, data_l_batch = tf.train.shuffle_batch([data_f_single, data_l_single], batch_size=batch_size, capacity=400, min_after_dequeue=2*batch_size)
return data_f_batch, data_l_batch
# use this function as input_fn to fit
classifier.fit(input_fn=lambda: data_input_fn(TRAINING_SET, batch_size, num_epochs), steps=4000)
I'm trying to work with lstm in tensorflow, but I got to the point I can't make a simple imdb sentiment model to converge.
I took a keras model and tried to duplicate the exact same model in tensorflow, in keras it trains and converge however in tensorflow it is just stuck at some point (0.69 loss).
I tried to make them as equal as possible, the only difference I can tell of is that in keras the padding is before the sequence, while in tensorflow I use 'post' padding due to the conventions in tensorflow.
Any idea whats wrong with my tensorflow model?
from __future__ import print_function
import random
import numpy as np
from tensorflow.contrib.keras.python.keras.preprocessing import sequence
from tensorflow.contrib.keras.python.keras.models import Sequential
from tensorflow.contrib.keras.python.keras.layers import Dense, Dropout, Activation
from tensorflow.contrib.keras.python.keras.layers import Embedding
from tensorflow.contrib.keras.python.keras.layers import LSTM
from tensorflow.contrib.keras.python.keras.layers import Conv1D, MaxPooling1D
from tensorflow.contrib.keras.python.keras.datasets import imdb
import tensorflow as tf
# Embedding
max_features = 30000
maxlen = 2494
embedding_size = 128
# Convolution
kernel_size = 5
filters = 64
pool_size = 4
# LSTM
lstm_output_size = 70
# Training
batch_size = 30
epochs = 2
class TrainData:
def __init__(self, batch_sz=batch_size):
(x_train, y_train), (_, _) = imdb.load_data(num_words=max_features)
y_train = [[int(x == 1), int(x != 1)] for x in y_train]
self._batch_size = batch_sz
self._train_data = sequence.pad_sequences(x_train, padding='pre')
self._train_labels = y_train
def next_batch(self):
if len(self._train_data) < self._batch_size:
self.__init__()
batch_x, batch_y = self._train_data[:self._batch_size], self._train_labels[:self._batch_size]
self._train_data = self._train_data[self._batch_size:]
self._train_labels = self._train_labels[self._batch_size:]
return batch_x, batch_y
def batch_generator(self):
while True:
if len(self._train_data) < self._batch_size:
self.__init__()
batch_x, batch_y = self._train_data[:self._batch_size], self._train_labels[:self._batch_size]
self._train_data = self._train_data[self._batch_size:]
self._train_labels = self._train_labels[self._batch_size:]
yield batch_x, batch_y
def get_num_batches(self):
return int(len(self._train_data) / self._batch_size)
def length(sequence):
used = tf.sign(tf.abs(sequence))
length = tf.reduce_sum(used, reduction_indices=1)
length = tf.cast(length, tf.int32)
return length
def get_model(x, y):
embedding = tf.get_variable("embedding", [max_features, embedding_size], dtype=tf.float32)
embedded_x = tf.nn.embedding_lookup(embedding, x)
print(x)
print(embedded_x)
print(length(x))
cell_1 = tf.contrib.rnn.BasicLSTMCell(lstm_output_size)
output_1, state_1 = tf.nn.dynamic_rnn(cell_1, embedded_x, dtype=tf.float32, scope="rnn_layer1",
sequence_length=length(x))
# Select last output.
last_index = tf.shape(output_1)[1] - 1
# reshaping to [seq_length, batch_size, num_units]
output = tf.transpose(output_1, [1, 0, 2])
last = tf.gather(output, last_index)
# Softmax layer
with tf.name_scope('fc_layer'):
weight = tf.get_variable(name="weights", shape=[lstm_output_size, 2])
bias = tf.get_variable(shape=[2], name="bias")
logits = tf.matmul(last, weight) + bias
loss = tf.losses.softmax_cross_entropy(y, logits=logits)
optimizer = tf.train.AdamOptimizer()
optimize_step = optimizer.minimize(loss=loss)
return loss, optimize_step
def tf_model():
x_holder = tf.placeholder(tf.int32, shape=[None, maxlen])
y_holder = tf.placeholder(tf.int32, shape=[None, 2])
loss, opt_step = get_model(x_holder, y_holder)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
step = 0
for epoch in range(10):
cost_epochs = []
train_data = TrainData()
cost_batch = 0
for batch in range(train_data.get_num_batches()):
x_train, y_train = train_data.next_batch()
_, cost_batch = sess.run([opt_step, loss],
feed_dict={x_holder: x_train,
y_holder: y_train})
cost_epochs.append(cost_batch)
step += 1
# if step % 100 == 0:
print("Epoch: " + str(epoch))
print("\tcost: " + str(np.mean(cost_epochs)))
def keras_model():
# print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
y_test = [[int(x == 1), int(x != 1)] for x in y_test]
x_test = sequence.pad_sequences(x_test, maxlen=maxlen, padding='pre')
model = Sequential()
model.add(Embedding(max_features, embedding_size, input_length=maxlen))
model.add(LSTM(lstm_output_size))
model.add(Dense(2))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print('Train...')
data = TrainData()
model.fit_generator(data.batch_generator(), steps_per_epoch=data.get_num_batches(),
epochs=epochs,
validation_data=(x_test, y_test))
if __name__ == '__main__':
# keras_model()
tf_model()
EDIT
When I limit the sequence length to 100 both models converge, so I assume there is something different in the the lstm layer.
Check the initial values of your operations. In my case the adadelta optimizer in keras had initial learning rate of 1.0 and in tf.keras it had 0.001 so in the mnist dataset it converged much slowly.