Best method for defining layers using tf keras api? - tensorflow

tensorflow.keras api not working on while creating the layers reference, any other methods of creating layers reference?
code :
layer=keras.layers
Error message : NameError: name 'leyer' is not defined
Full code is pasted here...
import tensorflow as tf
from tensorflow import keras
import pandas as pd
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import LabelEncoder
import numpy as np
#makin seed values
seed=7
np.random.seed(seed)
#setting up the dataset for training
dataframe=pd.read_csv("../datasets/iris.csv",header=None)
data=dataframe.values
input_x = data[:,0:4]
true_y = data[:,4]
#Encoding the true_y data to one hot encoding
le=LabelEncoder()
le.fit(true_y)
y_encoded = le.transform(true_y)
y_encoded = keras.utils.to_categorical(y_encoded,num_classes=3)
# creating the model
def base_fun():
layer=keras.layers
model = keras.models.Sequential()
model.add(layer.Dense(4,input_dim=4,kernel_initializer='normal',activation='relu'))
model.add(leyer.Dense(3, kernel_initializer='normal', activation='relu'))
estimator=keras.wrappers.scikit_learn.KerasClassifier(build_fn=base_fun,epochs=20,batch_size=10)
kfold = KFold(n_splits=10, shuffle=True, random_state=seed)
result = cross_val_score(estimator, input_x, y_encoded,cv=kfold)
print("Accuracy : %.2%% (%.2%%)" %(result.mean()*100, result.std()*100))

Well, this line:
model.add(leyer.Dnese(3, kernel_initializer='normal', activation='relu'))
has two typos, namely leyer should be layer and Dnese should be Dense like
model.add(layer.Dense(3, kernel_initializer='normal', activation='relu'))
Based on your comment, this line also causes an error:
estimator = keras.wrappers.scikit_learn.KerasClassifier( build_fn = base_fun, epochs = 20, batch_size = 10 )
From the Keras Scikit documentation:
build_fn should construct, compile and return a Keras model, which will then be used to fit/predict.
But you function base_fun() does not return anything. Append this line at the end of base_fun():
return model
As per your comment, the last print line could be changed to this (I don't know the % formatting, I generally use the syntax below):
print( "Accuracy : {:.2%} ({:.2%})".format( result.mean(), result.std() ) )

Related

'Loss not found' error when compiling model

I am making a program for classifying hand-written digits, but building the model gives me the following error
ValueError: No loss found. You may have forgotten to provide a `loss` argument in
the `compile()` method
the code for the model is as follows
# making the model
model = tf.keras.models.Sequential()
model.add(tf.keras.Input(28*28))
model.add(tf.keras.layers.Dense(128, activation = 'relu'))
model.add(tf.keras.layers.Dense(32, activation = 'relu'))
model.add(tf.keras.layers.Dense(10, activation = 'softmax'))
model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
print(model.summary())
I have tried with other losses but still the same error persists.
The libraries that I used are as follows
# importing important libraries
import tensorflow as tf
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np

Load HDF5 checkpoint models with custom metrics

I've created a Keras Regressor to run a RandomizedSearch CV using a ModelCheckpoint callback, but the training overran the Colab runtime 12H limit and stopped halfway through. The models are saved in hdf5 format.
I used tensorflow_addons to add the RSquare class to monitor the R2 for train and validation sets. However, when I used keras.models.load_model, I get the following error:
As you can see from the traceback, I have passed the custom_objects parameter, but still it is not recognised.
How can I solve this?
You can see the full code example below:
import os
import tensorflow as tf
import tensorflow_addons as tfa
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.layers import Input, InputLayer
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.model_selection import RandomizedSearchCV
def build_model(n_hidden = 2, n_neurons = 64, input_shape = (X_train.shape[1],), dropout = 0):
model = Sequential()
model.add(InputLayer(input_shape = input_shape))
for i in range(n_hidden):
model.add(Dense(n_neurons, activation = 'relu'))
model.add(Dropout(dropout))
model.add(Dense(1))
model.compile(loss = 'mean_squared_error', optimizer = 'adam', metrics = [tfa.metrics.RSquare(y_shape=(1,))])
return model
keras_reg = KerasRegressor(build_model)
checkdir = os.path.join(r'/content/drive/MyDrive/COP328/Case A', 'checkpoints', datetime.datetime.now().strftime('%d-%m-%Y_%H-%M-%S'), 'imputed_log1p-{epoch:02d}-{val_r_square:.3f}.hdf5')
callbacks = [ModelCheckpoint(checkdir, save_freq='epoch', save_best_only = True, monitor = 'val_r_square', mode = 'max'),
EarlyStopping(patience = 10)]
# Here is where training got interrupted because of Colab runtime being dropped:
param_dist = {
'n_hidden' : [1,2],
'n_neurons': [8,16,32,64,128],
'dropout': [0,0.2,0.4]
}
rnd_search_cv = RandomizedSearchCV(keras_reg, param_dist, n_iter= 15, cv = 5)
rnd_search_cv.fit(X_train, y_train, epochs = 200, batch_size = 64,
validation_data = (X_valid,y_valid),
callbacks = callbacks)
# Here is where I am trying to reload one of the most promising models based on R2, and getting the error:
from keras.models import load_model
import tensorflow_addons as tfa
rnd_model = load_model(r'/content/drive/MyDrive/COP328/Case A/checkpoints/26-06-2021_17-32-29/imputed_log1p-56-1.000.hdf5', custom_objects = {'r_square': tfa.metrics.RSquare, 'val_r_square': tfa.metrics.RSquare})
This solution doesn't take into account the addons package but one possibility is to create the coefficient of determination (R^2) as a different metric without that package, and then defining it as your loss.
def coeff_determination(y_true, y_pred):
from keras import backend as K
SS_res = K.sum(K.square( y_true-y_pred ))
SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) )
return ( 1 - SS_res/(SS_tot + K.epsilon()) )
Recall that minimizing MSE will maximize R^2.

Tensorflow-Keras reproducibility problem on Google Colab

I have a simple code to run on Google Colab (I use CPU mode):
import numpy as np
import pandas as pd
## LOAD DATASET
datatrain = pd.read_csv("gdrive/My Drive/iris_train.csv").values
xtrain = datatrain[:,:-1]
ytrain = datatrain[:,-1]
datatest = pd.read_csv("gdrive/My Drive/iris_test.csv").values
xtest = datatest[:,:-1]
ytest = datatest[:,-1]
import tensorflow as tf
from tensorflow.keras.layers import Dense, Activation
from tensorflow.keras.utils import to_categorical
## SET ALL SEED
import os
os.environ['PYTHONHASHSEED']=str(66)
import random
random.seed(66)
np.random.seed(66)
tf.set_random_seed(66)
from tensorflow.keras import backend as K
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
## MAIN PROGRAM
ycat = to_categorical(ytrain)
# build model
model = tf.keras.Sequential()
model.add(Dense(10, input_shape=(4,)))
model.add(Activation("sigmoid"))
model.add(Dense(3))
model.add(Activation("softmax"))
#choose optimizer and loss function
model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
# train
model.fit(xtrain, ycat, epochs=15, batch_size=32)
#get prediction
classes = model.predict_classes(xtest)
#get accuration
accuration = np.sum(classes == ytest)/len(ytest) * 100
I have read the setup to create a reproducibility code here Reproducible results using Keras with TensorFlow backend and I put all code in the same cell. But the result (e.g. the loss) is always different every time I run that cell (run the cell using shift + enter).
In my case, the result from the code above can be reproduced, if only:
I run using "runtime" > "restart and run all" or,
I put that code in a single file and run it using the command line (python3 file.py)
is there something I miss to make the result reproducible without restart the runtime?
You should also fix the seed for kernel_initializer in your Dense layers. So, your model will be like:
model = tf.keras.Sequential()
model.add(Dense(10, kernel_initializer=keras.initializers.glorot_uniform(seed=66), input_shape=(4,)))
model.add(Activation("sigmoid"))
model.add(Dense(3, kernel_initializer=keras.initializers.glorot_uniform(seed=66)))
model.add(Activation("softmax"))
I tried most of the solutions on the web and just the following codes worked for me :
seed=0
import os
os.environ['PYTHONHASHSEED'] = str(seed)
# For working on GPUs from "TensorFlow Determinism"
os.environ["TF_DETERMINISTIC_OPS"] = str(seed)
import numpy as np
np.random.seed(seed)
import random
random.seed(seed)
import tensorflow as tf
tf.random.set_seed(seed)
note that you should call this code before every run(at least for me)
if you want run your code on CPU:
seed=0
import os
os.environ['PYTHONHASHSEED'] = str(seed)
# For working on GPUs from "TensorFlow Determinism"
os.environ['CUDA_VISBLE_DEVICE'] = ''
import numpy as np
np.random.seed(seed)
import random
random.seed(seed)
import tensorflow as tf
tf.random.set_seed(seed)
I've tried to get Tensorflow 2.0 working reproducibly using Keras and Google Colab (CPU), with a version of the Iris dataset processing similar to that described above by #malioboro. This seems to work - might be useful:
# Install TensorFlow
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
# Setup repro section from Keras FAQ with TF1 to TF2 adjustments
import numpy as np
import tensorflow as tf
import random as rn
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
np.random.seed(42)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
rn.seed(12345)
# Force TensorFlow to use single thread.
# Multiple threads are a potential source of non-reproducible results.
# For further details, see: https://stackoverflow.com/questions/42022950/
session_conf = tf.compat.v1.ConfigProto(intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
# The below tf.set_random_seed() will make random number generation
# in the TensorFlow backend have a well-defined initial state.
# For further details, see:
# https://www.tensorflow.org/api_docs/python/tf/set_random_seed
tf.compat.v1.set_random_seed(1234)
sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph(), config=session_conf)
tf.compat.v1.keras.backend.set_session(sess)
# Rest of code follows ...
# Some adopted from: https://janakiev.com/notebooks/keras-iris/
# Some adopted from the question.
#
# Load Data
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder, StandardScaler
iris = load_iris()
X = iris['data']
y = iris['target']
names = iris['target_names']
feature_names = iris['feature_names']
# One hot encoding
enc = OneHotEncoder()
Y = enc.fit_transform(y[:, np.newaxis]).toarray()
# Scale data to have mean 0 and variance 1
# which is importance for convergence of the neural network
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
# Split the data set into training and testing
X_train, X_test, Y_train, Y_test = train_test_split(
X_scaled, Y, test_size=0.5, random_state=2)
n_features = X.shape[1]
n_classes = Y.shape[1]
## MAIN PROGRAM
from tensorflow.keras.layers import Dense, Activation
# build model
model = tf.keras.Sequential()
model.add(Dense(10, input_shape=(4,)))
model.add(Activation("sigmoid"))
model.add(Dense(3))
model.add(Activation("softmax"))
#choose optimizer and loss function
model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
# train
model.fit(X_train, Y_train, epochs=20, batch_size=32)
#get prediction
classes = model.predict_classes(X_test)

Keras,models.add() missing 1 required positional argument: 'layer'

I'm classifying digits of the MNIST dataset using a simple feed forward neural net with Keras. So I execute the code below.
import os
import tensorflow as tf
import keras
from keras.models import Sequential
from keras.layers import Dense, Activation
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('/tmp/data', one_hot=True)
# Path to Computation graphs
LOGDIR = './graphs_3'
# start session
sess = tf.Session()
#Hyperparameters
LEARNING_RATE = 0.01
BATCH_SIZE = 1000
EPOCHS = 10
# Layers
HL_1 = 1000
HL_2 = 500
# Other Parameters
INPUT_SIZE = 28*28
N_CLASSES = 10
model = Sequential
model.add(Dense(HL_1, input_dim=(INPUT_SIZE,), activation="relu"))
#model.add(Activation(activation="relu"))
model.add(Dense(HL_2, activation="relu"))
#model.add(Activation("relu"))
model.add(Dropout(rate=0.9))
model.add(Dense(N_CLASSES, activation="softmax"))
model.compile(
optimizer="Adam",
loss="categorical_crossentropy",
metrics=['accuracy'])
# one_hot_labels = keras.utils.to_categorical(labels, num_classes=10)
model.fit(
x=mnist.train.images,
y=mnist.train.labels,
epochs=EPOCHS,
batch_size=BATCH_SIZE)
score = model.evaluate(
x=mnist.test.images,
y=mnist.test.labels)
print("score = ", score)
However, I get the following error:
model.add(Dense(1000, input_dim=(INPUT_SIZE,), activation="relu"))
TypeError: add() missing 1 required positional argument: 'layer'
The syntax is exactly as shown in the keras docs. I am using keras 2.0.9, so I don't think it's a version control problem. Did I do something wrong?
It seems perfect indeed....
But I noticed you're not creating "an instance" of a sequential model, your using the class name instead:
#yours: model = Sequential
#correct:
model = Sequential()
Since the methods in a class are always declared containing self as the first argument, calling the methods without an instance will probably require the instance as the first argument (which is self).
The method's definition is def add(self,layer,...):

Possible compatibility issue with Keras, TensorFlow and scikit (tf.global_variables())

I'm trying to do a small test with my dataset on Keras Regressor (using TensorFlow), but I'm having a small issue. The error seems to be on the function cross_val_score from scikit. It starts on it and the last error message is:
File "/usr/local/lib/python2.7/dist-packages/Keras-2.0.2-py2.7.egg/keras/backend/tensorflow_backend.py", line 298, in _initialize_variables
variables = tf.global_variables()
AttributeError: 'module' object has no attribute 'global_variables'
My full code is basically the example found in http://machinelearningmastery.com/regression-tutorial-keras-deep-learning-library-python/ with small changes.
I've looked upon the " 'module' object has no attribute 'global_variables' " error and it seems to be about the Tensorflow version, but I'm using the most recent one (1.0) and there is no function in the code that works directly with tf that I can change. Below is my full code, is there anyway i can change it so it works? Thanks for the help
import numpy
import pandas
import sys
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_svmlight_file
# define base mode
def baseline_model():
# create model
model = Sequential()
model.add(Dense(68, activation="relu", kernel_initializer="normal", input_dim=68))
model.add(Dense(1, kernel_initializer="normal"))
# Compile model
model.compile(loss='mean_squared_error', optimizer='adam')
return model
X, y, query_id = load_svmlight_file(str(sys.argv[1]), query_id=True)
scaler = StandardScaler()
X = scaler.fit_transform(X.toarray())
# fix random seed for reproducibility
seed = 1
numpy.random.seed(seed)
# evaluate model with standardized dataset
estimator = KerasRegressor(build_fn=baseline_model, nb_epoch=100, batch_size=5, verbose=0)
kfold = KFold(n_splits=5, random_state=seed)
results = cross_val_score(estimator, X, y, cv=kfold)
print("Results: %.2f (%.2f) MSE" % (results.mean(), results.std()))
You are probably using an older Tensorflow version install tensorflow 1.2.0rc2 and you should be fine.