I am currently working through the tabnet tutorial with house price data.
kf = KFold(n_splits=5, random_state=42, shuffle=True)
predictions_array = []
predictions_array2 = []
CV_score_array = []
for train_index, test_index in kf.split(X): # train_index는 kfold1회에서 train조합, test_index는 테스트조합
X_train, X_valid = X[train_index], X[test_index]
y_train, y_valid = y[train_index], y[test_index]
regressor = TabNetRegressor(verbose=0,seed=42)
regressor.fit(X_train=X_train, y_train=y_train,
eval_set=[(X_valid, y_valid)],
patience=300, max_epochs=2000,
eval_metric=['rmse'])
CV_score_array.append(regressor.best_cost)
predictions_array.append(np.expm1(regressor.predict(X_test)))
predictions = np.mean(predictions_array,axis=0)
No matter how much I think about it, I don't understand why np.expm1(regressor.predict(X_test)) is applied rather than regressor.predict(X_test) value.
I referred to "https://www.kaggle.com/code/carlmcbrideellis/tabnet-a-very-simple-regression-example/notebook"
Related
tokenizer = Tokenizer(num_words=5000)
tokenizer.fit_on_texts(X1_train)
X1_train = tokenizer.texts_to_sequences(X1_train)
X1_val = tokenizer.texts_to_sequences(X1_val)
X1_test = tokenizer.texts_to_sequences(X1_test)
vocab_size = len(tokenizer.word_index) + 1
maxlen = 5000
X1_train = pad_sequences(X1_train, padding='post', maxlen=maxlen)
X1_val = pad_sequences(X1_val, padding='post', maxlen=maxlen)
X1_test = pad_sequences(X1_test, padding='post', maxlen=maxlen)
embeddings_dictionary = dict()
df_g = pd.read_csv('gs://----------/glove.6B.100d.txt', sep=" ", quoting=3, header=None, index_col=0)
embeddings_dictionary = {key: val.values for key, val in df_g.T.items()}
embedding_matrix = zeros((vocab_size, 100))
for word, index in tokenizer.word_index.items():
embedding_vector = embeddings_dictionary.get(word)
if embedding_vector is not None:
embedding_matrix[index] = embedding_vector
input_2_col_list= [x1,x2,...................., x30]
X2_train = X_train[input_2_col_list].values
X2_val = X_val[input_2_col_list].values
X2_test = X_test[[input_2_col_list].values
input_1 = Input(shape=(maxlen,))
input_2 = Input(shape=(30,))
embedding_layer = Embedding(vocab_size, 100, weights=[embedding_matrix], trainable=False)(input_1)
Bi_layer= Bidirectional(LSTM(128, return_sequences=True, dropout=0.15, recurrent_dropout=0.15))(embedding_layer) # Dimn shd be (None,200,128)
con_layer = Conv1D(64, kernel_size=3, padding='valid', kernel_initializer='glorot_uniform')(Bi_layer)
avg_pool = GlobalAveragePooling1D()(con_layer)
max_pool = GlobalMaxPooling1D()(con_layer)
dense_layer_1 = Dense(64, activation='relu')(input_2)
dense_layer_2 = Dense(64, activation='relu')(dense_layer_1)
concat_layer = Concatenate()([avg_pool,max_pool, dense_layer_2])
dense_layer_3 = Dense(50, activation='relu')(concat_layer)
output = Dense(2, activation='softmax')(dense_layer_3)
model = Model(inputs=[input_1, input_2], outputs=output)
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc',f1_m,precision_m, recall_m])
print(model.summary())
history = model.fit(x=[X1_train, X2_train], y=y_train, batch_size=30, epochs=10, verbose=1, validation_data=([X1_val,X2_val],y_val))
loss, accuracy, f1_score, precision, recall = model.evaluate(x=[X1_test, X2_test], y=y_test, verbose=0)
model.save('gs://----------/Tuned_hybrid_GCP_5000_CASETYPE_8_9.tf')
##################################################
loaded_model=tf.keras.models.load_model( 'gs://----------/Tuned_hybrid_GCP_5000_CASETYPE_8_9.tf', custom_objects={"f1_m": f1_m , "recall_m": recall_m, "precision_m": precision_m } )
loss, accuracy, f1_score, precision, recall = loaded_model.evaluate(x=[X1_test, X2_test], y=y_test, verbose=0) ###This is getting no error BUT the predictions are wrong
y_pred = loaded_model.predict(x=[X1_test, X2_test], batch_size=64, verbose=1)
y_pred_bool = np.argmax(y_pred, axis=1) ###This is getting no error BUT the predictions are wrong
##################################################################
import tensorflow_hub as hub
x=[X1_test, X2_test]
loaded_model_2 = tf.keras.Sequential([hub.KerasLayer('gs:---------------/Tuned_hybrid_GCP_100_CASETYPE_8_11_save.tf')])
loaded_model_2.build(x.shape) #### Getting an error
y_pred_2 = loaded_model_2.predict(x=[X1_test, X2_test], batch_size=64, verbose=1)
y_pred_bool_2 = np.argmax(y_pred_2, axis=1)
###################################################
#### Inside of the model folder the files and dirs are: assets/, variables/, saved_model.pb, keras_metadata.pb
#### Using 'us-docker.pkg.dev/vertex-ai/training/tf-gpu.2-8:latest' to train the model on Vertex AI
I have tried multiple saving a loading function with custom objects, but not of them are working properly
The working loaded model is predicting, but the outputs are not accurate. I have tested the similar TEST data to predict on the loaded model with another test script. The predictions are not matching after I loaded the model.
similar issues on StackOverflow: 'https://stackoverflow.com/questions/68937973/how-can-i-fix-the-problem-of-loading-the-model-to-get-new-predictions'
I'm newbie at machine learning(and at stackoverflow too). I want to ask for help.
I have two implementations of same two-layer autoencoder for mnist.
First one fits good:
import tensorflow as tf, numpy as np
def in_pics(pics):
return np.array(pics, np.float32)/255.
def out_pics(pics):
npformed = np.array(pics, np.float32)
samples = np.shape(npformed)[0]
return np.reshape(npformed, (samples, 784))/255.
(train_data, test_data) = tf.keras.datasets.mnist.load_data()
train_dataset = tf.data.Dataset.from_tensor_slices((in_pics(train_data[0]), out_pics(train_data[0]))).batch(100)
test_dataset = tf.data.Dataset.from_tensor_slices((in_pics(test_data[0]), out_pics(test_data[0]))).batch(100)
model = tf.keras.Sequential(
[
tf.keras.layers.InputLayer(input_shape=(28,28)),
tf.keras.layers.Reshape(target_shape=(784,)),
tf.keras.layers.Dense(128, activation="sigmoid", kernel_initializer=tf.keras.initializers.truncated_normal(stddev=0.1),
bias_initializer =tf.keras.initializers.truncated_normal(stddev=0.1)),
tf.keras.layers.Dense(784, activation="sigmoid", kernel_initializer=tf.keras.initializers.truncated_normal(stddev=0.1),
bias_initializer =tf.keras.initializers.truncated_normal(stddev=0.1)),
]
)
model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), optimizer="adam")
model.fit(x = train_dataset, epochs=20, verbose=1, validation_data=test_dataset)
Keras result
And I have the same model, same learning procedure, but written in terms of pure tensorflow(almost):
import tensorflow as tf, numpy as np
def prep_pics(pics):
npformed = np.array(pics, np.float32)
samples = np.shape(npformed)[0]
return np.reshape(npformed, (samples, 784))/255.
batch_size, num_batches = 100, 12000
(train_data, test_data) = tf.keras.datasets.mnist.load_data()
(train_pics_prepared, test_pics_prepared) = (prep_pics(train_data[0]), prep_pics(test_data[0]))
train_dataset = tf.data.Dataset.from_tensor_slices((train_pics_prepared, train_pics_prepared))
test_dataset = tf.data.Dataset.from_tensor_slices((test_pics_prepared, test_pics_prepared)).batch(batch_size)
encoder_W = tf.Variable(tf.keras.initializers.truncated_normal(stddev=0.1)(shape = [784,128], dtype = tf.float32))
encoder_b = tf.Variable(tf.keras.initializers.truncated_normal(stddev=0.1)(shape = [128],dtype = tf.float32))
decoder_W = tf.Variable(tf.keras.initializers.truncated_normal(stddev=0.1)(shape = [128,784], dtype = tf.float32))
decoder_b = tf.Variable(tf.keras.initializers.truncated_normal(stddev=0.1)(shape = [784],dtype = tf.float32))
optimizer = tf.keras.optimizers.Adam()
bin_cross = tf.keras.losses.BinaryCrossentropy(from_logits=True)
#tf.function
def trainstep(X_pack, Y_pack):
with tf.GradientTape() as tape:
tape.watch([encoder_W, encoder_b, decoder_W, decoder_b])
encoded = tf.nn.sigmoid(tf.matmul(X_pack, encoder_W) + encoder_b)
decoded = tf.nn.sigmoid(tf.matmul(encoded, decoder_W) + decoder_b)
loss = bin_cross(decoded, Y_pack)
gradients = tape.gradient(loss, [encoder_W, encoder_b, decoder_W, decoder_b])
optimizer.apply_gradients(zip(gradients, [encoder_W, encoder_b, decoder_W, decoder_b]))
num_samples = len(train_data[0])
epochs = num_batches//(num_samples//batch_size)
for i in range(epochs):
print(f"Epoch num {i+1}:")
dataset = train_dataset.shuffle(num_samples).batch(batch_size)
for x,y in dataset:
trainstep(x,y)
Loss is never becomes lower than 0.64(First one had 0.07 in the end of education).
Tensorflow result.
What I tried:
1.) Change optimizer to Adagrad and even to SGD.
2.) Use other loss - sigmoid_cross_entropy_with_logits.
3.) Learn more. Especially for SGD.
Cannot find mistake or typo almost for week. Please, help!
I am trying to use the Functional API in TensorFlow (https://keras.io/guides/functional_api/) to build a deep learning model. So, this is my model:
first_inputs = Input(shape=(100, ))
first_dense = Dense(1, )(first_inputs)
second_input = Input(shape=(1, ))
merge = concatenate([first_dense, second_input])
output = Dense(1, )(merge)
model = Model(inputs=[first_inputs, second_input], outputs=output)
model.compile(optimizer=ada_grad, loss='binary_crossentropy',
metrics=['accuracy'])
I use train_test_split as you see:
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.01, random_state=42)
How can I use model.fit here and say first_inputs and second_input are these columns in x_train? How can I use model.evaluate and say first_inputs and second_input are these columns in x_train?
You cannot say that. Multiple inputs should be presented to fit as lists of arrays. E.g:
X = np.random.randn(1234, 101)
X1, X2 = X[:,:100], X[:, 100]
Y = np.random.randn(1234, 1)
model.fit([X1, X2], Y)
i followed the guide found here(regression):
https://stackabuse.com/tensorflow-2-0-solving-classification-and-regression-problems/
using this dataset:
https://drive.google.com/file/d/1mVmGNx6cbfvRHC_DvF12ZL3wGLSHD9f_/view
and ended up with this code:
data = pd.read_csv(r'path')
X = data.iloc[:, 0:4].values
y = data.iloc[:, 4].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
input_layer = Input(shape=(X.shape[1],))
dense_layer_1 = Dense(100, activation='relu')(input_layer)
dense_layer_2 = Dense(50, activation='relu')(dense_layer_1)
dense_layer_3 = Dense(25, activation='relu')(dense_layer_2)
output = Dense(1)(dense_layer_3)
model = Model(inputs=input_layer, outputs=output)
model.compile(loss="mean_squared_error" , optimizer="adam", metrics=["mean_squared_error"])
history = model.fit(X_train, y_train, batch_size=2, epochs=100, verbose=1, validation_split=0.2)
from sklearn.metrics import mean_squared_error
from math import sqrt
pred_train = model.predict(X_train)
print(np.sqrt(mean_squared_error(y_train,pred_train)))
pred = model.predict(X_test)
print(np.sqrt(mean_squared_error(y_test,pred)))
Everything works and the model gets trained, but how do i actually use it? I want to input 4 intergers and in return get the prediction. So for example take the array [9, 4554, 1950, 0.634] and then get the predicted value. No matter what i do the model won't accept the data i am using.
Thanks for the help!
Main Problem which you are facing as per my understanding is dimension Because you insert [9,...,0.634] which of shape (4,) it mean 1D while X_test,X_train require to be 2D as per documentationo you have to convert 1D to 2D.
How You Convert
import numpy as np
X_test=[9,...,0.634]
X_test=np.array(X_test)
X_test=X_test.reshape(1,4)
model.predict(X_test)
s
I used LinearRegressor from tensorflow estimator library (tf.estimator.LinearRegressor) for my tensorflow model training, evaluation and then prediction. However, the evaluation always showed a very big loss value; therefore, the predictions were totally inaccurate.
Here are how I defined the train, evaluation, prediction input functions and the LinearRegressor:
def train_input_fn(x, y):
training_input_fn = tf.estimator.inputs.pandas_input_fn(
x = x,
y = y,
batch_size = 32,
shuffle = True,
num_epochs = None
)
return training_input_fn
def eval_input_fn(x, y):
eval_input_fn = tf.estimator.inputs.pandas_input_fn(
x = x,
y = y,
batch_size = 32,
shuffle = False,
num_epochs = 1
)
return eval_input_fn
def predict_input_fn(x):
predict_input_fn = tf.estimator.inputs.pandas_input_fn(
x = x,
shuffle = False,
num_epochs = 1
)
return predict_input_fn
def get_linear_regressor():
properties = load_data()
del properties['_id']
X_train, X_test, y_train, y_test = split_data(properties)
linear_regressor = tf.estimator.LinearRegressor(feature_columns=build_features(),
model_dir = "linear_regressor")
linear_regressor.train(input_fn = train_input_fn(X_train, y_train), steps=5000)
loss = linear_regressor.evaluate(input_fn = eval_input_fn(X_test, y_test))
print("Loss is: " + str(loss))
return linear_regressor
Result:
Loss is: {'average_loss': 417497550000.0, 'label/mean': 751504.7,
'loss': 13186813000000.0, 'prediction/mean': 331845.62, 'global_step':
145000}
I had 6472 data points which were split for training and evaluation with 8:2 ratio.
What have I done incorrectly? How can I improve the accuracy of prediction?