Hyperopt deterministic model Keras with seed - tensorflow

I am trying to use hyperopt for a classification deep learning model with keras:
import numpy as np
import tensorflow as tf
import random as rn
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
np.random.seed(1)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
rn.seed(2)
# Force TensorFlow to use single thread.
# Multiple threads are a potential source of non-reproducible results.
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
from keras import backend as K
tf.set_random_seed(2)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
#importing libraries
import ...
from hyperas import optim
from hyperas.distributions import choice, uniform, randint
from hyperopt import Trials, STATUS_OK, tpe
def data():
return x_train_sequence, y_train, x_test_sequence, y_test
# ===============
# Model creation
# ===============
def create_model(x_train_sequence, y_train, x_test_sequence, y_test):
embedding_dim = {{choice([...])}}
lstm = {{choice([...])}}
num_epochs = {{choice([...])}}
dropout = {{uniform(0, 1)}}
batch_size = {{choice([32])}}
model = Sequential()
model.add(...)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=["binary_accuracy"])
# Fit the model and evaluate
result = model.fit(x_train_sequence, y_train,
batch_size=batch_size,
validation_data=(x_test_sequence, y_test), verbose=verbose, shuffle=True, epochs=num_epochs)
return {'loss': -validation_acc, 'status': STATUS_OK, 'model': model}
# ===============
# Apply model and find best run
# ===============
if __name__ == '__main__':
best_run, best_model = optim.minimize(model=create_model,
data=data,
algo=tpe.suggest,
rseed=np.random.seed(1),
max_evals=50,
trials=Trials())
X_train, Y_train, X_test, Y_test = data()
print("Evalutation of best performing model:")
print(best_model.evaluate(X_test, Y_test))
print("Best performing model chosen hyper-parameters:")
print(best_run)
Even though I thought I put all the necessary seeds to obtain reproducible cases. I keep getting different results even if I substitute the {{choice([...])}} with integers.
What am I missing? What should I add to seed the model properly?
Thanks so much in advance!

Related

Getting Value Error while creating RNN model

Im getting this error: ValueError: logits and labels must have the same shape, received ((32, 1) vs (32, 23740))
This is my full code:
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense, Embedding, LSTM
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score, f1_score
import re
import numpy as np
from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
from keras.utils import pad_sequences
# All data processing stuff
data = pd.read_csv('test.csv') # Load the data
# convert Sentiment type to string
data['Sentiment'] = data['Sentiment'].astype(str)
# remove special characters and convert to lowercase
data["Tweet"] = data["Tweet"].apply(lambda x: x.lower()) # Convert all tweets to lowercase
data["Tweet"] = data["Tweet"].apply(lambda x: x.replace("[^a-zA-Z0-9]", "")) # Remove special characters
data["Sentiment"] = data["Sentiment"].apply(lambda x: x.lower())
# Initialize the Tokenizer
tokenizer = Tokenizer()
# Fit the Tokenizer on the text data
tokenizer.fit_on_texts(data["Tweet"] + data["Sentiment"])
token_sequences1 = tokenizer.texts_to_matrix(data["Tweet"])
token_sequences2 = tokenizer.texts_to_matrix(data["Sentiment"])
padded_sequences1 = pad_sequences(token_sequences1)
padded_sequences2 = pad_sequences(token_sequences2)
# split the data into training and testing sets
x_train, x_test, y_train, y_test = train_test_split(
padded_sequences1, padded_sequences2, test_size=0.2, random_state=42)
# create the RNN model
model = Sequential()
# 1000 is the number of words in the vocabulary, 128 is the dimension of the embedding vector,
model.add(Embedding(1000, 128))
model.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(1, activation='sigmoid'))
# compile the model with a specified loss function and optimizer
# binary_crossentropy is used for binary classification problems like this one (positive or negative sentiment)
# accuracy is the metric used to evaluate the model performance (the percentage of correct predictions)
# the loss function and the optimizer can be changed to see if the model performance improves or not
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
# train the model on the training data and put verbose=1 to see the training progress
model.fit(x_train, y_train, batch_size=32, epochs=10, verbose=1)
# evaluate the model on the testing data
y_pred = model.predict(x_test)
# calculate the evaluation metrics
accuracy = accuracy_score(y_test, y_pred)
precision = precision_score(y_test, y_pred)
recall = recall_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
# print the evaluation metrics
print("Accuracy: {:.2f}".format(accuracy))
print("Precision: {:.2f}".format(precision))
print("Recall: {:.2f}".format(recall))
print("F1 Score: {:.2f}".format(f1))
Im actually trying to create sentiment analyser on news headlines tweets from this data, here is a sample image of how my data looks in CSV.
Please provide a solution to this, I have tried every solution I had find

How to find class labels from a keras model

I am predicting classes, but there is something I don't get. In the simplified example below, I train a model to predict MNIST handwritten digits. My test set has an accuracy of 95%, when I use
model.evaluate(test_image, test_label)
However, when I use
model.predict(test_image)
and the extract the predicted labels using np.argmax(), this accuracy drops. When I run all the code again and again, this accuracy changes a lot.
I suspect now that the classes in the model are not ordered 0, 1 ... 9. Is there a way to see the class labels of a model? Or did I make another mistake?
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.datasets.mnist import load_data
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.models import Sequential
import numpy as np
# Load data
(train_image, train_label), (test_image, test_label) = load_data()
# Train
model = Sequential([
Flatten(input_shape=(28,28)),
Dense(100, activation="relu"),
Dense(100, activation="relu"),
Dense(10, activation="sigmoid")
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics='accuracy')
history = model.fit(train_image, train_label,
batch_size=32, epochs=50,
validation_data=(test_image, test_label),
verbose = 0)
eval = model.evaluate(test_image, test_label)
print('Accuracy (auto):', eval[1]) # This is always high
# Predict and evaluate manually
predictions = model.predict(test_image)
pred = np.array([np.argmax(pred) for pred in predictions])
true = test_label
print('Accuracy (manually):', np.mean(pred == true)) # This varies a lot

AttributeError: Can't get attribute 'create_model' on <module '__main__'>

I have created a neural network model and created an ensemble learning model which is the voting model. I have combined a Neural network with random forest,and xgboost. Now I saved the model and try to load it to another Jupiter notebook but I get this error AttributeError: Can't get attribute 'create_model' on <module 'main'>
Here is the code to create the models and it in 1st notebook
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_val_score
import numpy
# Function to create model, required for KerasClassifier
def create_model(input_shape=66):
#x_shape= data_x.shape
#input_dim=x_shape[1]
# create model
model = Sequential()
model.add(Dense(12, input_dim=66, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1,activation='sigmoid'))
# Compile model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
seed = 7
numpy.random.seed(seed)
Kc_model = KerasClassifier(
create_model, # Pass in function
input_shape=66, # Pass in the dimensions to above function
epochs=100,
batch_size=32,
verbose=False)
Kc_model._estimator_type = "classifier"
Kc_model.fit(x_train, y_train, epochs=100,batch_size=10)
rf = RandomForestClassifier(max_depth=15, random_state=0)
rf.fit(x_train,y_train)
rf_y_pred = rf.predict(x_test)
#Model Score
print("The accuracy score for Random Forest Classifier is")
print("Accuracy:{}%".format(round(metrics.accuracy_score(y_test, rf_y_pred)*100)))
print("Training:{}%".format(round(rf.score(x_train, y_train)*100)))
print("Test set: {}%".format(round(rf.score(x_test, y_test)*100)))
xgboost_model = XGBClassifier()
xgboost_model.fit(x_train, y_train)
xgboost_y_pred = xgboost_model.predict(x_test)
print("The accuracy score for Voting XGB Classifier is")
print("Accuracy:{}%".format(round(metrics.accuracy_score(y_test, xgboost_y_pred)*100)))
print("Training:{}%".format(round(xgboost_model.score(x_train, y_train)*100)))
print("Test set: {}%".format(round(xgboost_model.score(x_test, y_test)*100)))
from keras.wrappers.scikit_learn import KerasClassifier
import scikeras
from tensorflow import keras
voting = VotingClassifier(
estimators = [('rf',rf),('xgboost_model',xgboost_model),('Kc_model',Kc_model) ],
voting='soft')
#reshaping=y_test.reshape(2712,1)
voting_model =voting.fit(x_train, y_train)
voting_pred = voting_model.predict(x_test)
#Model Score
print("The accuracy score for Voting Classifier is")
print("Training:{}%".format(round(voting_model.score(x_train, y_train)*100)))
print("Test set: {}%".format(round(voting_model.score(x_test, y_test)*100)))
import pickle
# save
with open('voting_model.pkl','wb') as f:
pickle.dump(Kc_model,f)
In the second notebook that I try to load the model , I get an error as you can see below
import pickle
import pandas as pd
with open('voting_model.pkl', 'rb') as f:
Kc_model = pickle.load(f)
The reason this happens is that the keras.wrappers.scikit_learn.KerasClassifier wrapper cannot be pickled. The model building function is not saved. Instead, you should pickle the fitted model:
import pickle
# save
with open('voting_model.pkl','wb') as f:
pickle.dump(Kc_model.model, f)
Now, you can load your model and use it as you wish.
with open('voting_model.pkl', 'rb') as f:
model = pickle.load(f)
# Predict something.
model.predict(X_test)
However, if you need a KerasClassifier instance after loading then you should re-wrap it. Then, you also need to save the classes_ attribute. Finally, now the build function would return the loaded pickle:
# Save this as well.
with open('voting_model_classes.pkl', 'wb') as f:
pickle.dump(Kc_model.classes_, f)
import pickle
from keras.wrappers.scikit_learn import KerasClassifier
def load_model():
with open('voting_model.pkl', 'rb') as f:
return pickle.load(f)
def load_classes():
with open('voting_model_classes.pkl', 'rb') as f:
return pickle.load(f)
Kc_model = KerasClassifier(
load_model,
epochs=100,
batch_size=32,
verbose=False)
Kc_model._estimator_type = "classifier"
# We need to manually call it because it will only be called once the classifier is re-fitted.
Kc_model.model = load_model()
Kc_model.classes_ = load_classes()
# Now you can use Kc_model as KerasClassifier.
The error is expected: the model building function gets pickled by name, and that name doesn't exist in your new notebook.
You could try SciKeras which has an initialize method (docs) which you can call to restore stuff like classes_ if you choose to serialize your Keras model using SavedModel directly (SciKeras's KerasClassifier will gladly accept a model instance).

Why is Google colab TPU slow?

I'm using Talos to run hyperparameter tuning of a Keras model. Running this short code on Google colab TPU is very slow. I think it has something to do with the type of data. Should I convert it to tensors to make the TPU faster?
%tensorflow_version 2.x
import os
import tensorflow as tf
import talos as ta
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam
from sklearn.model_selection import train_test_split
def iris_model(x_train, y_train, x_val, y_val, params):
# Specify a distributed strategy to use TPU
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='grpc://' + os.environ['COLAB_TPU_ADDR'])
tf.config.experimental_connect_to_host(resolver.master())
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.experimental.TPUStrategy(resolver)
# Use the strategy to create and compile a Keras model
with strategy.scope():
model = Sequential()
model.add(Dense(32, input_shape=(4,), activation=tf.nn.relu, name="relu"))
model.add(Dense(3, activation=tf.nn.softmax, name="softmax"))
model.compile(optimizer=Adam(learning_rate=0.1), loss=params['losses'])
# Convert data type to use TPU
x_train = x_train.astype('float32')
x_val = x_val.astype('float32')
# Fit the Keras model on the dataset
out = model.fit(x_train, y_train, batch_size=params['batch_size'], epochs=params['epochs'], validation_data=[x_val, y_val], verbose=0, steps_per_epoch=0)
return out, model
# Load dataset
X, y = ta.templates.datasets.iris()
# Train and test set
x_train, x_val, y_train, y_val = train_test_split(X, y, test_size=0.30, shuffle=False)
# Create a hyperparameter distributions
p = {'losses': ['logcosh'], 'batch_size': [128, 256, 384, 512, 1024], 'epochs': [10, 20]}
# Use Talos to scan the best hyperparameters of the Keras model
scan_object = ta.Scan(x_train, y_train, params=p, model=iris_model, experiment_name='test', x_val=x_val, y_val=y_val, fraction_limit=0.5)
Thank you for your question.
Unfortunately, I was not able to get your code sample to run on TensorFlow 2.2, so I don't know what performance you were seeing originally. I was able to fix it up and get it running on TPUs with the following changes:
Replace tf.config.experimental_connect_to_host(resolver.master()) with tf.config.experimental_connect_to_cluster(resolver)
Move TPU initialization outside of iris_model().
Use tf.data.Dataset for TPU input.
Here's the modified Colab code:
# Run this to install Talos before running the rest of the code.
!pip install git+https://github.com/autonomio/talos#1.0
%tensorflow_version 2.x
import os
import tensorflow as tf
import talos as ta
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam
from sklearn.model_selection import train_test_split
print(tf.__version__) # TF 2.2.0 in my case
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='grpc://' + os.environ['COLAB_TPU_ADDR'])
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
def iris_model(x_train, y_train, x_val, y_val, params):
# Use the strategy to create and compile a Keras model
strategy = tf.distribute.experimental.TPUStrategy(resolver)
with strategy.scope():
model = Sequential()
model.add(Dense(32, input_shape=(4,), activation=tf.nn.relu, name="relu"))
model.add(Dense(3, activation=tf.nn.softmax, name="softmax"))
model.compile(optimizer=Adam(learning_rate=0.1), loss=params['losses'])
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(params['batch_size'])
val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val)).batch(params['batch_size'])
# Fit the Keras model on the dataset
out = model.fit(train_dataset, epochs=params['epochs'], validation_data=val_dataset)
return out, model
# Load dataset
X, y = ta.templates.datasets.iris()
# Train and test set
x_train, x_val, y_train, y_val = train_test_split(X, y, test_size=0.30, shuffle=False)
# Create a hyperparameter distributions
p = {'losses': ['logcosh'], 'batch_size': [128, 256, 384, 512, 1024], 'epochs': [10, 20]}
# Use Talos to scan the best hyperparameters of the Keras model
scan_object = ta.Scan(x_train, y_train, params=p, model=iris_model, experiment_name='test', x_val=x_val, y_val=y_val, fraction_limit=0.5)
For me, the last call took a little less than 2 minutes.
For well-known datasets, you can skip the step of creating your own tf.data.Dataset by using the TensorFlow Datasets library. TFDS does have the iris dataset in their library. For an end-to-end example of using TFDS with TPUs, see TensorFlow's official TPU guide.

Tensorflow-Keras reproducibility problem on Google Colab

I have a simple code to run on Google Colab (I use CPU mode):
import numpy as np
import pandas as pd
## LOAD DATASET
datatrain = pd.read_csv("gdrive/My Drive/iris_train.csv").values
xtrain = datatrain[:,:-1]
ytrain = datatrain[:,-1]
datatest = pd.read_csv("gdrive/My Drive/iris_test.csv").values
xtest = datatest[:,:-1]
ytest = datatest[:,-1]
import tensorflow as tf
from tensorflow.keras.layers import Dense, Activation
from tensorflow.keras.utils import to_categorical
## SET ALL SEED
import os
os.environ['PYTHONHASHSEED']=str(66)
import random
random.seed(66)
np.random.seed(66)
tf.set_random_seed(66)
from tensorflow.keras import backend as K
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
## MAIN PROGRAM
ycat = to_categorical(ytrain)
# build model
model = tf.keras.Sequential()
model.add(Dense(10, input_shape=(4,)))
model.add(Activation("sigmoid"))
model.add(Dense(3))
model.add(Activation("softmax"))
#choose optimizer and loss function
model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
# train
model.fit(xtrain, ycat, epochs=15, batch_size=32)
#get prediction
classes = model.predict_classes(xtest)
#get accuration
accuration = np.sum(classes == ytest)/len(ytest) * 100
I have read the setup to create a reproducibility code here Reproducible results using Keras with TensorFlow backend and I put all code in the same cell. But the result (e.g. the loss) is always different every time I run that cell (run the cell using shift + enter).
In my case, the result from the code above can be reproduced, if only:
I run using "runtime" > "restart and run all" or,
I put that code in a single file and run it using the command line (python3 file.py)
is there something I miss to make the result reproducible without restart the runtime?
You should also fix the seed for kernel_initializer in your Dense layers. So, your model will be like:
model = tf.keras.Sequential()
model.add(Dense(10, kernel_initializer=keras.initializers.glorot_uniform(seed=66), input_shape=(4,)))
model.add(Activation("sigmoid"))
model.add(Dense(3, kernel_initializer=keras.initializers.glorot_uniform(seed=66)))
model.add(Activation("softmax"))
I tried most of the solutions on the web and just the following codes worked for me :
seed=0
import os
os.environ['PYTHONHASHSEED'] = str(seed)
# For working on GPUs from "TensorFlow Determinism"
os.environ["TF_DETERMINISTIC_OPS"] = str(seed)
import numpy as np
np.random.seed(seed)
import random
random.seed(seed)
import tensorflow as tf
tf.random.set_seed(seed)
note that you should call this code before every run(at least for me)
if you want run your code on CPU:
seed=0
import os
os.environ['PYTHONHASHSEED'] = str(seed)
# For working on GPUs from "TensorFlow Determinism"
os.environ['CUDA_VISBLE_DEVICE'] = ''
import numpy as np
np.random.seed(seed)
import random
random.seed(seed)
import tensorflow as tf
tf.random.set_seed(seed)
I've tried to get Tensorflow 2.0 working reproducibly using Keras and Google Colab (CPU), with a version of the Iris dataset processing similar to that described above by #malioboro. This seems to work - might be useful:
# Install TensorFlow
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
# Setup repro section from Keras FAQ with TF1 to TF2 adjustments
import numpy as np
import tensorflow as tf
import random as rn
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
np.random.seed(42)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
rn.seed(12345)
# Force TensorFlow to use single thread.
# Multiple threads are a potential source of non-reproducible results.
# For further details, see: https://stackoverflow.com/questions/42022950/
session_conf = tf.compat.v1.ConfigProto(intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
# The below tf.set_random_seed() will make random number generation
# in the TensorFlow backend have a well-defined initial state.
# For further details, see:
# https://www.tensorflow.org/api_docs/python/tf/set_random_seed
tf.compat.v1.set_random_seed(1234)
sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph(), config=session_conf)
tf.compat.v1.keras.backend.set_session(sess)
# Rest of code follows ...
# Some adopted from: https://janakiev.com/notebooks/keras-iris/
# Some adopted from the question.
#
# Load Data
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder, StandardScaler
iris = load_iris()
X = iris['data']
y = iris['target']
names = iris['target_names']
feature_names = iris['feature_names']
# One hot encoding
enc = OneHotEncoder()
Y = enc.fit_transform(y[:, np.newaxis]).toarray()
# Scale data to have mean 0 and variance 1
# which is importance for convergence of the neural network
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
# Split the data set into training and testing
X_train, X_test, Y_train, Y_test = train_test_split(
X_scaled, Y, test_size=0.5, random_state=2)
n_features = X.shape[1]
n_classes = Y.shape[1]
## MAIN PROGRAM
from tensorflow.keras.layers import Dense, Activation
# build model
model = tf.keras.Sequential()
model.add(Dense(10, input_shape=(4,)))
model.add(Activation("sigmoid"))
model.add(Dense(3))
model.add(Activation("softmax"))
#choose optimizer and loss function
model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
# train
model.fit(X_train, Y_train, epochs=20, batch_size=32)
#get prediction
classes = model.predict_classes(X_test)