Possible compatibility issue with Keras, TensorFlow and scikit (tf.global_variables()) - tensorflow

I'm trying to do a small test with my dataset on Keras Regressor (using TensorFlow), but I'm having a small issue. The error seems to be on the function cross_val_score from scikit. It starts on it and the last error message is:
File "/usr/local/lib/python2.7/dist-packages/Keras-2.0.2-py2.7.egg/keras/backend/tensorflow_backend.py", line 298, in _initialize_variables
variables = tf.global_variables()
AttributeError: 'module' object has no attribute 'global_variables'
My full code is basically the example found in http://machinelearningmastery.com/regression-tutorial-keras-deep-learning-library-python/ with small changes.
I've looked upon the " 'module' object has no attribute 'global_variables' " error and it seems to be about the Tensorflow version, but I'm using the most recent one (1.0) and there is no function in the code that works directly with tf that I can change. Below is my full code, is there anyway i can change it so it works? Thanks for the help
import numpy
import pandas
import sys
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_svmlight_file
# define base mode
def baseline_model():
# create model
model = Sequential()
model.add(Dense(68, activation="relu", kernel_initializer="normal", input_dim=68))
model.add(Dense(1, kernel_initializer="normal"))
# Compile model
model.compile(loss='mean_squared_error', optimizer='adam')
return model
X, y, query_id = load_svmlight_file(str(sys.argv[1]), query_id=True)
scaler = StandardScaler()
X = scaler.fit_transform(X.toarray())
# fix random seed for reproducibility
seed = 1
numpy.random.seed(seed)
# evaluate model with standardized dataset
estimator = KerasRegressor(build_fn=baseline_model, nb_epoch=100, batch_size=5, verbose=0)
kfold = KFold(n_splits=5, random_state=seed)
results = cross_val_score(estimator, X, y, cv=kfold)
print("Results: %.2f (%.2f) MSE" % (results.mean(), results.std()))

You are probably using an older Tensorflow version install tensorflow 1.2.0rc2 and you should be fine.

Related

Lambda function in tensorflow to do some changes to the data inside the model

My goal is changing the data inside the model using lambda function
the code fails at the last part in model.predict
can someone help me fix this or give me a similar one if you have?
import glob
import tensorflow as tf
import tensorflow_hub as hub
from os.path import basename, join
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
from keras.optimizers import Adam
from keras import layers
from sklearn.metrics import auc, average_precision_score
import numpy as np
import base64
import cv2
#this is the function that i want to call inside the model
#it take th data which in an image encoded base64 and it decode it back into a numpy array
def base64_decoder(inputs):
binary_data = base64.b64decode(inputs)
img = cv2.imdecode(np.frombuffer(binary_data, dtype=np.uint8), cv2.IMREAD_UNCHANGED)
return img
#this is the model i'm using to test
model_handle="https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet1k_s/feature_vector/2"
INPUT_SIZE=(331, 331,3)
model = tf.keras.Sequential([
tf.keras.layers.Lambda(base64_decoder),
tf.keras.layers.InputLayer(input_shape=INPUT_SIZE),
hub.KerasLayer(model_handle, trainable=True),
tf.keras.layers.Dropout(rate=0.2),
layers.Dense(1, activation='sigmoid')
])
model.compile(loss='binary_crossentropy', optimizer=Adam(), metrics=[tf.keras.metrics.AUC(curve='PR')])
# now i will test the model
im = np.random.rand(331,331,3)
img = np.expand_dims(im, axis=0)
#encode the random image
_, buffer = cv2.imencode('.jpg', im)
jpg_as_text = base64.b64encode(buffer)
value=jpg_as_text.decode('utf-8')
#predict
print(model.predict(value))
also i may change the lambda function to a full custom function, what do you think?

Keras and tensorflow conflict when transfer learning on MobileNetV3

I'm trying to do transfer learning with MobileNetV3 in Keras but I'm having some issues.
from keras.models import Model
from keras.layers import GlobalMaxPooling2D, Dense, Dropout
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from tensorflow.keras.applications import MobileNetV3Small
import numpy as np
from tqdm import tqdm
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
pretrained_model = MobileNetV3Small(input_shape=(224,224,3),
weights="imagenet",
include_top=False)
# freeze all layers except the last one
for layer in pretrained_model.layers:
layer.trainable = False
pretrained_model.layers[-1].trainable = True
# combine the model with some extra layers for classification
last_output = pretrained_model.layers[-1].output
x = GlobalMaxPooling2D()(last_output)
x = Dense(128, activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(1, activation='sigmoid')(x)
model = Model(pretrained_model.input, x)
I get this error when I try to make the Dense layer:
TypeError: Cannot convert a symbolic Keras input/output to a numpy array. This error may indicate that you're trying to pass a symbolic value to a NumPy call, which is not supported. Or, you may be trying to pass Keras symbolic inputs/outputs to a TF API that does not register dispatching, preventing Keras from automatically converting the API call to a lambda layer in the Functional Model.
but it's fixed by adding the following code snippet:
from tensorflow.python.framework.ops import disable_eager_execution
disable_eager_execution()
When I include the code fix above, I get this error when I call model.fit():
FailedPreconditionError: 2 root error(s) found.
(0) Failed precondition: Could not find variable Conv_1_2/kernel. This could mean that the variable has been deleted. In TF1, it can also mean the variable is uninitialized. Debug info: container=localhost, status=Not found: Resource localhost/Conv_1_2/kernel/N10tensorflow3VarE does not exist.
[[{{node Conv_1_2/Conv2D/ReadVariableOp}}]]
[[_arg_dense_12_target_0_1/_100]]
(1) Failed precondition: Could not find variable Conv_1_2/kernel. This could mean that the variable has been deleted. In TF1, it can also mean the variable is uninitialized. Debug info: container=localhost, status=Not found: Resource localhost/Conv_1_2/kernel/N10tensorflow3VarE does not exist.
[[{{node Conv_1_2/Conv2D/ReadVariableOp}}]]
0 successful operations.
0 derived errors ignored.
How can I fix these issues and train the model?
From comments
Don't mix tf.keras and standalone keras. They are not compatible. Only use one of them (paraphrased from Frightera)
Working code as shown below
from tensorflow.keras.models import Model
from tensorflow.keras.layers import GlobalMaxPooling2D, Dense, Dropout
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.applications import MobileNetV3Small
import numpy as np
from tqdm import tqdm
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
pretrained_model = MobileNetV3Small(input_shape=(224,224,3),
weights="imagenet",
include_top=False)
# freeze all layers except the last one
for layer in pretrained_model.layers:
layer.trainable = False
pretrained_model.layers[-1].trainable = True
# combine the model with some extra layers for classification
last_output = pretrained_model.layers[-1].output
x = GlobalMaxPooling2D()(last_output)
x = Dense(128, activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(1, activation='sigmoid')(x)
model = Model(pretrained_model.input, x)

Input 0 of layer fc1 is incompatible with the layer: expected axis -1 of input shape to have value 25088 but received input with shape (None, 32768)

I'm implementing SRGAN (and am not very experienced in this field), which uses a pre-trained VGG19 model to extract features. The following code was working fine on Keras 2.1.2 and tf 1.15.0 till yesterday. then it started throwing an "AttributeError: module 'keras.utils.generic_utils' has no attribute 'populate_dict_with_module_objects'" So i updated the keras version to 2.4.3 and tf to 2.5.0. but then its showing a "Input 0 of layer fc1 is incompatible with the layer: expected axis -1 of input shape to have value 25088 but received input with shape (None, 32768)" on the following line
features = vgg(input_layer)
But here the input has to be (256,256,3).
I had downgraded the keras and tf versions to the one I mentioned before to get rid of this error in the first place and it was working well till yesterday.
changing the input shape to (224,224,3) does not work. Any help in solving this error will be very appreciated.
import glob
import time
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import keras
from keras.layers import Input
from keras.applications.vgg19 import VGG19
from keras.callbacks import TensorBoard
from keras.layers import BatchNormalization, Activation, LeakyReLU, Add, Dense,Flatten
from keras.layers.convolutional import Conv2D, UpSampling2D
from keras.models import Model
from keras.optimizers import Adam
from scipy.misc import imread, imresize
from PIL import Image
def build_vgg():
input_shape = (256, 256, 3)
vgg = VGG19(weights="imagenet")
vgg.outputs = [vgg.layers[9].output]
input_layer = Input(shape=input_shape)
features = vgg(input_layer)
model = Model(inputs=[input_layer], outputs=[features])
return model
vgg = build_vgg()
vgg.trainable = False
vgg.compile(loss='mse', optimizer=common_optimizer, metrics=['accuracy'])
# Build and compile the discriminator
discriminator = build_discriminator()
discriminator.compile(loss='mse', optimizer=common_optimizer, metrics=['accuracy'])
# Build the generator network
generator = build_generator()
The Error message
Im using google colab
Importing keras from tensorflow and setting include_top=False in
vgg = VGG19(weights="imagenet",include_top=False)
seems to work.

Tensorflow-Keras reproducibility problem on Google Colab

I have a simple code to run on Google Colab (I use CPU mode):
import numpy as np
import pandas as pd
## LOAD DATASET
datatrain = pd.read_csv("gdrive/My Drive/iris_train.csv").values
xtrain = datatrain[:,:-1]
ytrain = datatrain[:,-1]
datatest = pd.read_csv("gdrive/My Drive/iris_test.csv").values
xtest = datatest[:,:-1]
ytest = datatest[:,-1]
import tensorflow as tf
from tensorflow.keras.layers import Dense, Activation
from tensorflow.keras.utils import to_categorical
## SET ALL SEED
import os
os.environ['PYTHONHASHSEED']=str(66)
import random
random.seed(66)
np.random.seed(66)
tf.set_random_seed(66)
from tensorflow.keras import backend as K
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
## MAIN PROGRAM
ycat = to_categorical(ytrain)
# build model
model = tf.keras.Sequential()
model.add(Dense(10, input_shape=(4,)))
model.add(Activation("sigmoid"))
model.add(Dense(3))
model.add(Activation("softmax"))
#choose optimizer and loss function
model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
# train
model.fit(xtrain, ycat, epochs=15, batch_size=32)
#get prediction
classes = model.predict_classes(xtest)
#get accuration
accuration = np.sum(classes == ytest)/len(ytest) * 100
I have read the setup to create a reproducibility code here Reproducible results using Keras with TensorFlow backend and I put all code in the same cell. But the result (e.g. the loss) is always different every time I run that cell (run the cell using shift + enter).
In my case, the result from the code above can be reproduced, if only:
I run using "runtime" > "restart and run all" or,
I put that code in a single file and run it using the command line (python3 file.py)
is there something I miss to make the result reproducible without restart the runtime?
You should also fix the seed for kernel_initializer in your Dense layers. So, your model will be like:
model = tf.keras.Sequential()
model.add(Dense(10, kernel_initializer=keras.initializers.glorot_uniform(seed=66), input_shape=(4,)))
model.add(Activation("sigmoid"))
model.add(Dense(3, kernel_initializer=keras.initializers.glorot_uniform(seed=66)))
model.add(Activation("softmax"))
I tried most of the solutions on the web and just the following codes worked for me :
seed=0
import os
os.environ['PYTHONHASHSEED'] = str(seed)
# For working on GPUs from "TensorFlow Determinism"
os.environ["TF_DETERMINISTIC_OPS"] = str(seed)
import numpy as np
np.random.seed(seed)
import random
random.seed(seed)
import tensorflow as tf
tf.random.set_seed(seed)
note that you should call this code before every run(at least for me)
if you want run your code on CPU:
seed=0
import os
os.environ['PYTHONHASHSEED'] = str(seed)
# For working on GPUs from "TensorFlow Determinism"
os.environ['CUDA_VISBLE_DEVICE'] = ''
import numpy as np
np.random.seed(seed)
import random
random.seed(seed)
import tensorflow as tf
tf.random.set_seed(seed)
I've tried to get Tensorflow 2.0 working reproducibly using Keras and Google Colab (CPU), with a version of the Iris dataset processing similar to that described above by #malioboro. This seems to work - might be useful:
# Install TensorFlow
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
# Setup repro section from Keras FAQ with TF1 to TF2 adjustments
import numpy as np
import tensorflow as tf
import random as rn
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
np.random.seed(42)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
rn.seed(12345)
# Force TensorFlow to use single thread.
# Multiple threads are a potential source of non-reproducible results.
# For further details, see: https://stackoverflow.com/questions/42022950/
session_conf = tf.compat.v1.ConfigProto(intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
# The below tf.set_random_seed() will make random number generation
# in the TensorFlow backend have a well-defined initial state.
# For further details, see:
# https://www.tensorflow.org/api_docs/python/tf/set_random_seed
tf.compat.v1.set_random_seed(1234)
sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph(), config=session_conf)
tf.compat.v1.keras.backend.set_session(sess)
# Rest of code follows ...
# Some adopted from: https://janakiev.com/notebooks/keras-iris/
# Some adopted from the question.
#
# Load Data
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder, StandardScaler
iris = load_iris()
X = iris['data']
y = iris['target']
names = iris['target_names']
feature_names = iris['feature_names']
# One hot encoding
enc = OneHotEncoder()
Y = enc.fit_transform(y[:, np.newaxis]).toarray()
# Scale data to have mean 0 and variance 1
# which is importance for convergence of the neural network
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
# Split the data set into training and testing
X_train, X_test, Y_train, Y_test = train_test_split(
X_scaled, Y, test_size=0.5, random_state=2)
n_features = X.shape[1]
n_classes = Y.shape[1]
## MAIN PROGRAM
from tensorflow.keras.layers import Dense, Activation
# build model
model = tf.keras.Sequential()
model.add(Dense(10, input_shape=(4,)))
model.add(Activation("sigmoid"))
model.add(Dense(3))
model.add(Activation("softmax"))
#choose optimizer and loss function
model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
# train
model.fit(X_train, Y_train, epochs=20, batch_size=32)
#get prediction
classes = model.predict_classes(X_test)

Best method for defining layers using tf keras api?

tensorflow.keras api not working on while creating the layers reference, any other methods of creating layers reference?
code :
layer=keras.layers
Error message : NameError: name 'leyer' is not defined
Full code is pasted here...
import tensorflow as tf
from tensorflow import keras
import pandas as pd
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import LabelEncoder
import numpy as np
#makin seed values
seed=7
np.random.seed(seed)
#setting up the dataset for training
dataframe=pd.read_csv("../datasets/iris.csv",header=None)
data=dataframe.values
input_x = data[:,0:4]
true_y = data[:,4]
#Encoding the true_y data to one hot encoding
le=LabelEncoder()
le.fit(true_y)
y_encoded = le.transform(true_y)
y_encoded = keras.utils.to_categorical(y_encoded,num_classes=3)
# creating the model
def base_fun():
layer=keras.layers
model = keras.models.Sequential()
model.add(layer.Dense(4,input_dim=4,kernel_initializer='normal',activation='relu'))
model.add(leyer.Dense(3, kernel_initializer='normal', activation='relu'))
estimator=keras.wrappers.scikit_learn.KerasClassifier(build_fn=base_fun,epochs=20,batch_size=10)
kfold = KFold(n_splits=10, shuffle=True, random_state=seed)
result = cross_val_score(estimator, input_x, y_encoded,cv=kfold)
print("Accuracy : %.2%% (%.2%%)" %(result.mean()*100, result.std()*100))
Well, this line:
model.add(leyer.Dnese(3, kernel_initializer='normal', activation='relu'))
has two typos, namely leyer should be layer and Dnese should be Dense like
model.add(layer.Dense(3, kernel_initializer='normal', activation='relu'))
Based on your comment, this line also causes an error:
estimator = keras.wrappers.scikit_learn.KerasClassifier( build_fn = base_fun, epochs = 20, batch_size = 10 )
From the Keras Scikit documentation:
build_fn should construct, compile and return a Keras model, which will then be used to fit/predict.
But you function base_fun() does not return anything. Append this line at the end of base_fun():
return model
As per your comment, the last print line could be changed to this (I don't know the % formatting, I generally use the syntax below):
print( "Accuracy : {:.2%} ({:.2%})".format( result.mean(), result.std() ) )