I am practicing how to create an LSTM model on a univariate series using this dataset from Kaggle: https://www.kaggle.com/sumanthvrao/daily-climate-time-series-data
My issue is that I am unable to get an accurate prediction of the temperature and my loss seems to be going all over the place. I have tried multiple methods including
Ensuring that time series data is stationary
Changing the time steps
Changing the hyperparameters
Using a stacked LSTM model
I am really curious as to what is wrong with my code although I do have a few hypothesis:
I made an error when preprocessing the data
I introduced stationarity wrongly
This dataset requires a multivariate approach
%tensorflow_version 2.x # this line is not required unless you are in a notebook
import tensorflow as tf
from numpy import array
from numpy import argmax
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import os
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
# preparing independent and dependent features
def prepare_data(timeseries_data, n_features):
X, y =[],[]
for i in range(len(timeseries_data)):
# find the end of this pattern
end_ix = i + n_features
# check if we are beyond the sequence
if end_ix > len(timeseries_data)-1:
break
# gather input and output parts of the pattern
seq_x, seq_y = timeseries_data[i:end_ix], timeseries_data[end_ix]
X.append(seq_x)
y.append(seq_y)
return np.array(X), np.array(y)
# preparing independent and dependent features
def prepare_x_input(timeseries_data, n_features):
x = []
for i in range(len(timeseries_data)):
# find the end of this pattern
end_ix = i + n_features
# check if we are beyond the sequence
if end_ix > len(timeseries_data):
break
# gather input and output parts of the pattern
seq_x = timeseries_data[i:end_ix]
x.append(seq_x)
x = x[-1:]
#remove non-stationerity
#x = np.log(x)
return np.array(x)
#read data and filter temperature column
df = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/Weather Parameter/DailyDelhiClimateTrain.csv')
df.head()
temp_df = df.pop('meantemp')
plt.plot(temp_df)
#make data stationery
sta_temp_df = np.log(temp_df).diff()
plt.figure(figsize=(15,5))
plt.plot(sta_temp_df)
print(sta_temp_df)
time_step = 7
x, y = prepare_data(sta_temp_df, time_step)
n_features = 1
x = x.reshape((x.shape[0], x.shape[1], n_features))
model = Sequential()
model.add(LSTM(10, return_sequences=True, input_shape=(time_step, n_features)))
model.add(LSTM(10))
model.add(Dense(16, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mse')
model.summary()
result = model.fit(x, y, epochs=800)
n_days = 113
pred_temp_df = list(temp_df)
test = sta_temp_df.copy()
sta_temp_df = list(sta_temp_df)
i = 0
while(i<n_days):
x_input = prepare_x_input(sta_temp_df, time_step)
print(x_input)
x_input = x_input.reshape((1, time_step, n_features))
#pass data into model
yhat = model.predict(x_input, verbose=0)
yhat.flatten
print(yhat[0][0])
sta_temp_df.append(yhat[0][0])
i = i+1
sta_temp_df[0] = np.log(temp_df[0])
cum_temp_df = np.exp(np.cumsum(sta_temp_df))
print(cum_temp_df)
My code is shown above. Would really appreciate if someone can identify what I did wrong here!
Related
I am trying to do hyperparameter optimization for a regression problem using a deep neural network.
I am getting slightly high MAE values (5-7) when in the literature this is around 0.9-5 (using the same dataset to train the NN).
Any idea of what I could improve? I can provide the dataset if needed, but it's very large.
X looks like this:
Y looks like this:
X is composed of 865432 rows=features and 134 columns=samples where rows are ordered by decreasing pearson correlation with variable Y which is the age of each sample (a float variable).
Each entry in X is a number between 0-1.
Since there are too many features for the number of samples I decided to take only the 40000 most important features. (Also because I don't know how to include feature selection within the training of the model).
Here is the loss vs epochs:
#!/usr/bin/env python3
import numpy as np
import pandas as pd
import os
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from scikeras.wrappers import KerasRegressor
import keras.losses
from keras import backend as K
import keras_tuner as kt
from keras_tuner.tuners import RandomSearch
from sklearn.model_selection import cross_val_score
def dynamic_model(hp):
# hyperparameters (independent of number of layers)
# Number of hidden layers: 1 - 50
hp_num_layers = hp.Int("num_layers", 2, 10)
hp_learning_rate = hp.Choice("learning_rate", values=[1e-2, 1e-3, 1e-4])
# Initialize sequential API and start building model.
model = keras.Sequential()
model.add(keras.layers.InputLayer(input_shape=(CpG_num,)))
# Tune the number of hidden layers and units in each
for i in range(1, hp_num_layers):
# hyperparameters (dependent on num_layers):
hp_units = hp.Int(f"units_{i}", min_value=100, max_value=400, step=100)
hp_dropout_rate = hp.Float(f"dropout_{i}", 0, 0.5, step=0.1)
hp_activation = hp.Choice(f"act_{i}", ['LeakyReLU', 'PReLU'])
model.add(
keras.layers.Dense(units=hp_units, kernel_initializer='normal', activation=hp_activation)
)
model.add(keras.layers.Dropout(hp_dropout_rate))
# Add output layer
model.add(keras.layers.Dense(units=1, kernel_initializer='normal'))
# Define optimizer, loss, and metrics
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=hp_learning_rate),
loss=loss_function,
metrics=metrics
)
return model
def correlation_coefficient_loss(y_true, y_pred):
x = y_true
y = y_pred
mx = K.mean(x)
my = K.mean(y)
xm, ym = x-mx, y-my
print(type(mx))
r_num = K.sum(tf.multiply(xm,ym))
r_den = K.sqrt(tf.multiply(K.sum(K.square(xm)), K.sum(K.square(ym))))
r = r_num / r_den
r = K.maximum(K.minimum(r, 1.0), -1.0)
return (1 - K.square(r))
loss_function = 'mae'
metrics = ['mae', correlation_coefficient_loss]
scoring_function = 'neg_mean_absolute_error'
tuner_kind = 'random'
results_dir = '../tmp_files/Grid_Search_results/'+name+tuner_kind
hp = kt.HyperParameters()
if tuner_kind == 'random':
tuner = kt.RandomSearch(
dynamic_model,
objective = 'mae',
overwrite=True,
directory = results_dir,
project_name = 'random_tuner_trials',
max_trials = 500
)
tuner.search(
X1[name],Y1[name],
epochs=50,
validation_data=(X2[name],Y2[name]),
)
best_hps=tuner.get_best_hyperparameters(num_trials=1)[0]
best_hps_model = dynamic_model(best_hps)
best_hps_model.fit(X1_train[name],y1_train[name],validation_data=(X2_train[name],y2_train[name]), epochs=500, batch_size=10)
import pyreadr
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np
from sklearn import preprocessing
# сбор тестовых данных в массивы
result = pyreadr.read_r("/home/ignat/Downloads/Telegram Desktop/TMS_coefficients.RData")
dataset = []
values = []
for i in range(694):
dataset.append((result['tms.coef']['bs0'][i],
result['tms.coef']['bs'][i],
result['tms.coef']['bi0'][i],
result['tms.coef']['bi'][i],
result['tms.coef']['b0'][i],
result['tms.coef']['b1'][i],
result['tms.coef']['b2'][i],
result['tms.coef']['a0'][i],
result['tms.coef']['a1'][i]))
values.append([0.0 if result['tms.coef']['Y'][i] == "НС"
else 1.0 if result['tms.coef']['Y'][i] == "AD"
else 2.0 if result['tms.coef']['Y'][i] == "DLB"
else 3.0])
dataset = np.array(dataset, dtype="float")
values = np.array(values, dtype="float")
print(dataset[0])
print(values[0])
(trainX, testX, trainY, testY) = train_test_split(dataset,
values, test_size=0.25, random_state=42)
# модель нейронки
visible = layers.Input(shape=(9,))
drop1 = layers.Dropout(0.5, input_shape=(9,))(visible)
hidden1 = layers.Dense(32, activation="relu")(drop1)
drop2 = layers.Dropout(0.5, input_shape=(9,))(hidden1)
output = layers.Dense(1, activation="relu")(drop2)
model = tf.keras.Model(inputs=visible, outputs=output)
# компиляция
model.compile(optimizer=tf.keras.optimizers.Adam(0.01),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'],)
# обучение
model.fit(trainX, trainY, validation_data=(testX, testY), epochs=100)
model.save('my_model')
for i in range(1, 10):
print(model.predict(dataset), values[-i])
While go fit I have troubles - Nan Lose
dataset[i] example:
1.35684728e-01 -4.03785135e-02 -8.27514734e-02 4.21657613e-03
1.40184876e-01 1.06088863e-02 -1.31599134e-03 -1.77011366e+00
-7.19767825e-02
values[i] example:
1.
I beg your help for me in debug only this fragment of code. Im google the problem very much, because i wont to see abstract instruction
I think the main problem here might be... the loss function that you have chosen. Normally categorical_crossentropy is used in multiclass problems. As you are having only 1 neuron in your output layer this might lead to some problems. So if you are trying to make a classification you might want to switch the loss to:
# компиляция
model.compile(optimizer=tf.keras.optimizers.Adam(0.01),
loss='binary_crossentropy',
metrics=['accuracy'],)
I am using TF2 (2.3.0) NN to approximate the function y which solves the ODE: y'+3y=0
I have defined cutsom loss class and function in which I am trying to differentiate the single output with respect to the single input so the equation holds, provided that y_true is zero:
from tensorflow.keras.losses import Loss
import tensorflow as tf
class CustomLossOde(Loss):
def __init__(self, x, model, name='ode_loss'):
super().__init__(name=name)
self.x = x
self.model = model
def call(self, y_true, y_pred):
with tf.GradientTape() as tape:
tape.watch(self.x)
y_p = self.model(self.x)
dy_dx = tape.gradient(y_p, self.x)
loss = tf.math.reduce_mean(tf.square(dy_dx + 3 * y_pred - y_true))
return loss
but running the following NN:
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense
from tensorflow.keras import Input
from custom_loss_ode import CustomLossOde
num_samples = 1024
x_train = 4 * (tf.random.uniform((num_samples, )) - 0.5)
y_train = tf.zeros((num_samples, ))
inputs = Input(shape=(1,))
x = Dense(16, 'tanh')(inputs)
x = Dense(8, 'tanh')(x)
x = Dense(4)(x)
y = Dense(1)(x)
model = Model(inputs=inputs, outputs=y)
loss = CustomLossOde(model.input, model)
model.compile(optimizer=Adam(learning_rate=0.01, beta_1=0.9, beta_2=0.99),loss=loss)
model.run_eagerly = True
model.fit(x_train, y_train, batch_size=16, epochs=30)
for now I am getting 0 loss from the fisrt epoch, which doesn't make any sense.
I have printed both y_true and y_test from within the function and they seem OK so I suspect that the problem is in the gradien which I didn't succeed to print.
Apprecitate any help
Defining a custom loss with the high level Keras API is a bit difficult in that case. I would instead write the training loop from scracth, as it allows a finer grained control over what you can do.
I took inspiration from those two guides :
Advanced Automatic Differentiation
Writing a training loop from scratch
Basically, I used the fact that multiple tape can interact seamlessly. I use one to compute the loss function, the other to calculate the gradients to be propagated by the optimizer.
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense
from tensorflow.keras import Input
num_samples = 1024
x_train = 4 * (tf.random.uniform((num_samples, )) - 0.5)
y_train = tf.zeros((num_samples, ))
inputs = Input(shape=(1,))
x = Dense(16, 'tanh')(inputs)
x = Dense(8, 'tanh')(x)
x = Dense(4)(x)
y = Dense(1)(x)
model = Model(inputs=inputs, outputs=y)
# using the high level tf.data API for data handling
x_train = tf.reshape(x_train,(-1,1))
dataset = tf.data.Dataset.from_tensor_slices((x_train,y_train)).batch(1)
opt = Adam(learning_rate=0.01, beta_1=0.9, beta_2=0.99)
for step, (x,y_true) in enumerate(dataset):
# we need to convert x to a variable if we want the tape to be
# able to compute the gradient according to x
x_variable = tf.Variable(x)
with tf.GradientTape() as model_tape:
with tf.GradientTape() as loss_tape:
loss_tape.watch(x_variable)
y_pred = model(x_variable)
dy_dx = loss_tape.gradient(y_pred, x_variable)
loss = tf.math.reduce_mean(tf.square(dy_dx + 3 * y_pred - y_true))
grad = model_tape.gradient(loss, model.trainable_variables)
opt.apply_gradients(zip(grad, model.trainable_variables))
if step%20==0:
print(f"Step {step}: loss={loss.numpy()}")
I'm trying to make the following code piece at the end run.
However, i'm getting the following error when i try to fit my model:
"ValueError: setting an array element with a sequence."
I'm trying to use a RNN to predict the next 5 days of prices. So, in the function create_ts I'm trying to create two time series, one with the first X items and another with X+1, X+2, X+3, X+4, and X+5 - these five items being the next five days of prices i'd like to predict.
I suspect the problem is here somewhere:
def create_ts(ds, series, day_gap):
x, y = [], []
for i in range(len(ds) - series - 1):
item = ds[i:(i+series),0]
x.append(item)
next_item = ds[i+series:(i+series+day_gap), 0]
y.append(next_item)
#print(type(np.array(x)), type(np.array(y)))
return np.array(x), np.array(y).reshape(-1,1)
series = 5
predict_days = 5
train_x, train_y = create_ts(stock_train, series, predict_days)
test_x, test_y = create_ts(stock_test, series, predict_days)
#reshape into LSTM format - samples, steps, features
train_x = np.reshape(train_x, (train_x.shape[0], train_x.shape[1], 1))
test_x = np.reshape(test_x, (test_x.shape[0], test_x.shape[1], 1))
#build model
model = Sequential()
model.add(LSTM(4,input_shape = (series, 1)))
model.add(Dense(1))
model.compile(loss='mse', optimizer = 'adam')
#fit model
model.fit(train_x, train_y, epochs = 100, batch_size = 32)
Thanks in advance for any help!
Below is the full code piece:
from keras import backend as k
import os
from importlib import reload
def set_keras_backend(backend):
if k.backend() != backend:
os.environ['KERAS_BACKEND'] = backend
reload(k)
assert k.backend() == backend
set_keras_backend("cntk")
import numpy as np
import pandas as pd
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
import math
np.random.seed(7)
#load dataset
fileloc = "C:\\Stock Data\\CL1.csv"
stock_data = pd.read_csv(fileloc)
stock_data.head()
stock_data.dtypes
stock_data['Date'] = pd.to_datetime(stock_data['Date'])
stock_data['Price'] = pd.to_numeric(stock_data['Price'], downcast = 'float')
stock_data.set_index('Date', inplace=True)
stock_close = stock_data['Price']
stock_close = stock_close.values.reshape(len(stock_close), 1)
plt.plot(stock_close)
#normalize data
scaler = MinMaxScaler(feature_range = (0,1))
stock_close = scaler.fit_transform(stock_close)
#split data into a train, test set
train_size = int(len(stock_close)*0.7)
test_size = len(stock_close) - train_size
stock_train, stock_test = stock_close[0:train_size, :], stock_close[train_size:len(stock_close), :]
#convert the data into a time series looking back over a period fo days
def create_ts(ds, series, day_gap):
x, y = [], []
for i in range(len(ds) - series - 1):
item = ds[i:(i+series),0]
x.append(item)
next_item = ds[i+series:(i+series+day_gap), 0]
y.append(next_item)
#print(type(np.array(x)), type(np.array(y)))
return np.array(x), np.array(y).reshape(-1,1)
series = 5
predict_days = 5
train_x, train_y = create_ts(stock_train, series, predict_days)
test_x, test_y = create_ts(stock_test, series, predict_days)
#reshape into LSTM format - samples, steps, features
train_x = np.reshape(train_x, (train_x.shape[0], train_x.shape[1], 1))
test_x = np.reshape(test_x, (test_x.shape[0], test_x.shape[1], 1))
#build model
model = Sequential()
model.add(LSTM(4,input_shape = (series, 1)))
model.add(Dense(1))
model.compile(loss='mse', optimizer = 'adam')
#fit model
model.fit(train_x, train_y, epochs = 100, batch_size = 32)
I'm trying to code multiclass output and classes are ['A','B','C','D','E','F','G'].
Could someone elaborate more next error message:
"ValueError: You are passing a target array of shape (79, 1) while using as loss categorical_crossentropy. categorical_crossentropy expects targets to be binary matrices (1s and 0s) of shape (samples, classes). If your targets are integer classes, you can convert them to the expected format via:
from keras.utils.np_utils import to_categorical
y_binary = to_categorical(y_int)
Alternatively, you can use the loss function sparse_categorical_crossentropy instead, which does expect integer targets."
My code:
# Part 1 - Data Preprocessing
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataa = pd.read_csv('test_out.csv')
XX = dataa.iloc[:, 0:4].values
yy = dataa.iloc[:, 4].values
# Encoding categorical data
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_Y_1 = LabelEncoder()
yy = labelencoder_Y_1.fit_transform(yy)
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(XX, yy, test_size = 0.2,
random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Part 2 - Now let's make the ANN!
# Importing the Keras libraries and packages
import keras
from keras.models import Sequential
from keras.layers import Dense
# Initialising the ANN
classifier = Sequential()
# Adding the input layer and the first hidden layer
classifier.add(Dense(output_dim = 6, init = 'uniform', activation = 'relu',
input_dim = 4))
# Adding the second hidden layer
classifier.add(Dense(output_dim = 6, init = 'uniform', activation = 'relu'))
# Adding the output layer
classifier.add(Dense(output_dim = 1, init = 'uniform', activation =
'softmax'))
# Compiling the ANN
classifier.compile(optimizer = 'adam', loss = 'categorical_crossentropy',
metrics = ['accuracy'])
# Fitting the ANN to the Training set
classifier.fit(X_train, y_train, batch_size = 10, nb_epoch = 50)
# Part 3 - Making the predictions and evaluating the model
# Predicting the Test set results
y_pred = classifier.predict(X_test)
y_pred = (y_pred > 0.5)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
The problem lies in this portion of your code,
# Encoding categorical data
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_Y_1 = LabelEncoder()
yy = labelencoder_Y_1.fit_transform(yy)
You forgot to one-hot encode the yy, please take note that LabelEncoder only transforms your categorical data to numerical one, i.e. [A, B, C, D, E, F, G] to [1, 2, 3, 4, 5, 6, 7]. You have to one-hot encode it since you want to use softmax activation, and categorical_crossentropy (I'm over-simplifying, but it's the gist).
So, it should have been like this,
# Encoding categorical data
from keras.utils import to_categorical
from sklearn.preprocessing import LabelEncoder
labelencoder_Y_1 = LabelEncoder()
yy = labelencoder_Y_1.fit_transform(yy)
yy = to_categorical(yy)
I assume your target class that you are going to predict is binary i.e there are only 2 possible values that could occur
If your target is binary then, the last layer of the model should be activated with sigmoid activation function. Also, the model should be compiled with binary_crossentropy or sparse_categorical_crossentropy.
If the target is multi-class i.e more than 2 possible values, you must convert the target to categorical with the help of to_categorical from keras. Then you should compile your model with categorical_crossentropy and the last layer in the model should be activated with softmax activation function.!!