I have a tensorflow model written through model subclassing and I want to export it to ONNX format. This is simple enough with the script attached. However, the name of the input variable to the ONNX model is args_0. How can I rename it?
import tensorflow as tf
import tf2onnx
from tensorflow.python.keras import Model
from tensorflow.python.keras.layers import Dense
class MyModel(Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dense = Dense(16)
def call(self, inputs, **kwargs):
return self.dense(inputs)
def to_onnx(self, output_path, opset=14):
model_proto, _ = tf2onnx.convert.from_keras(
self,
input_signature=[tf.TensorSpec((1, 128))],
opset=opset,
output_path=output_path,
)
return
if __name__ == "__main__":
output_path = "./test.onnx"
A = MyModel()
A.to_onnx(output_path)
you can provide the input name in input_signature as name="input_name" so, it should look like
input_signature=[tf.TensorSpec((1, 128), name="input_name")],
as shown in this notebook
Related
I have created a neural network model and created an ensemble learning model which is the voting model. I have combined a Neural network with random forest,and xgboost. Now I saved the model and try to load it to another Jupiter notebook but I get this error AttributeError: Can't get attribute 'create_model' on <module 'main'>
Here is the code to create the models and it in 1st notebook
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_val_score
import numpy
# Function to create model, required for KerasClassifier
def create_model(input_shape=66):
#x_shape= data_x.shape
#input_dim=x_shape[1]
# create model
model = Sequential()
model.add(Dense(12, input_dim=66, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1,activation='sigmoid'))
# Compile model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
seed = 7
numpy.random.seed(seed)
Kc_model = KerasClassifier(
create_model, # Pass in function
input_shape=66, # Pass in the dimensions to above function
epochs=100,
batch_size=32,
verbose=False)
Kc_model._estimator_type = "classifier"
Kc_model.fit(x_train, y_train, epochs=100,batch_size=10)
rf = RandomForestClassifier(max_depth=15, random_state=0)
rf.fit(x_train,y_train)
rf_y_pred = rf.predict(x_test)
#Model Score
print("The accuracy score for Random Forest Classifier is")
print("Accuracy:{}%".format(round(metrics.accuracy_score(y_test, rf_y_pred)*100)))
print("Training:{}%".format(round(rf.score(x_train, y_train)*100)))
print("Test set: {}%".format(round(rf.score(x_test, y_test)*100)))
xgboost_model = XGBClassifier()
xgboost_model.fit(x_train, y_train)
xgboost_y_pred = xgboost_model.predict(x_test)
print("The accuracy score for Voting XGB Classifier is")
print("Accuracy:{}%".format(round(metrics.accuracy_score(y_test, xgboost_y_pred)*100)))
print("Training:{}%".format(round(xgboost_model.score(x_train, y_train)*100)))
print("Test set: {}%".format(round(xgboost_model.score(x_test, y_test)*100)))
from keras.wrappers.scikit_learn import KerasClassifier
import scikeras
from tensorflow import keras
voting = VotingClassifier(
estimators = [('rf',rf),('xgboost_model',xgboost_model),('Kc_model',Kc_model) ],
voting='soft')
#reshaping=y_test.reshape(2712,1)
voting_model =voting.fit(x_train, y_train)
voting_pred = voting_model.predict(x_test)
#Model Score
print("The accuracy score for Voting Classifier is")
print("Training:{}%".format(round(voting_model.score(x_train, y_train)*100)))
print("Test set: {}%".format(round(voting_model.score(x_test, y_test)*100)))
import pickle
# save
with open('voting_model.pkl','wb') as f:
pickle.dump(Kc_model,f)
In the second notebook that I try to load the model , I get an error as you can see below
import pickle
import pandas as pd
with open('voting_model.pkl', 'rb') as f:
Kc_model = pickle.load(f)
The reason this happens is that the keras.wrappers.scikit_learn.KerasClassifier wrapper cannot be pickled. The model building function is not saved. Instead, you should pickle the fitted model:
import pickle
# save
with open('voting_model.pkl','wb') as f:
pickle.dump(Kc_model.model, f)
Now, you can load your model and use it as you wish.
with open('voting_model.pkl', 'rb') as f:
model = pickle.load(f)
# Predict something.
model.predict(X_test)
However, if you need a KerasClassifier instance after loading then you should re-wrap it. Then, you also need to save the classes_ attribute. Finally, now the build function would return the loaded pickle:
# Save this as well.
with open('voting_model_classes.pkl', 'wb') as f:
pickle.dump(Kc_model.classes_, f)
import pickle
from keras.wrappers.scikit_learn import KerasClassifier
def load_model():
with open('voting_model.pkl', 'rb') as f:
return pickle.load(f)
def load_classes():
with open('voting_model_classes.pkl', 'rb') as f:
return pickle.load(f)
Kc_model = KerasClassifier(
load_model,
epochs=100,
batch_size=32,
verbose=False)
Kc_model._estimator_type = "classifier"
# We need to manually call it because it will only be called once the classifier is re-fitted.
Kc_model.model = load_model()
Kc_model.classes_ = load_classes()
# Now you can use Kc_model as KerasClassifier.
The error is expected: the model building function gets pickled by name, and that name doesn't exist in your new notebook.
You could try SciKeras which has an initialize method (docs) which you can call to restore stuff like classes_ if you choose to serialize your Keras model using SavedModel directly (SciKeras's KerasClassifier will gladly accept a model instance).
I want to do what BatchNormalization layer does in Keras, of removing the mean and storing a moving average. Unfortunately, the BatchNormalization layer in Keras always considers the variance too, and I don't want to use it.
I was thinking on using Average and Subtract layers, but they don't store anything for usage when training has ended. The idea is that my layer removes and learn the mean, so when predicting on test, it subtracts a constant value.
I created a Centering layer to do this, copied from BatchNormalization code. It uses momentum to move the current moving mean value. It seems to work and I can save and load models with it.
from tensorflow.keras import backend
from tensorflow.keras import initializers
from tensorflow.keras import layers
from tensorflow import math
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import ops
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
class Centering(layers.Layer):
"""Layer that centers the data learning a mean."""
def __init__(self, momentum=0.01, **kwargs):
"""Constructor of LatentProjection."""
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super().__init__(**kwargs)
self.input_spec = layers.InputSpec(min_ndim=2)
self.momentum = momentum
self.moving_mean = None
def build(self, input_shape):
"""Create internal variables."""
assert len(input_shape) >= 2
input_dim = input_shape[-1]
self.moving_mean = self.add_weight(
name='moving_mean',
shape=(input_dim,),
initializer=initializers.Zeros,
synchronization=variables.VariableSynchronization.ON_READ,
trainable=False,
aggregation=variables.VariableAggregation.MEAN,
experimental_autocast=False)
self.input_spec = layers.InputSpec(min_ndim=2, axes={-1: input_dim})
self.built = True
def _get_training_value(self, training=None):
"""Copied from normalization.py."""
if training is None:
training = backend.learning_phase()
if isinstance(training, int):
training = bool(training)
if not self.trainable:
# When the layer is not trainable, it overrides the value
# passed from model.
training = False
return training
def _support_zero_size_input(self):
"""Copied from normalization.py."""
return distribution_strategy_context.has_strategy() and getattr(
distribution_strategy_context.get_strategy().extended,
'experimental_enable_get_next_as_optional', False)
def _assign_moving_average(self, variable, value, momentum, inputs_size):
"""Copied from normalization.py."""
with backend.name_scope('AssignMovingAvg') as scope:
with ops.colocate_with(variable):
decay = ops.convert_to_tensor_v2(1.0 - momentum, name='decay')
if decay.dtype != variable.dtype.base_dtype:
decay = math_ops.cast(decay, variable.dtype.base_dtype)
update_delta = (variable -
math_ops.cast(value, variable.dtype)) * decay
if inputs_size is not None:
update_delta = array_ops.where(
inputs_size > 0, update_delta,
backend.zeros_like(update_delta))
return state_ops.assign_sub(variable, update_delta, name=scope)
def call(self, inputs, training=None, **kwargs):
"""Called for each mini batch when applied to input layer."""
training = self._get_training_value(training)
training_value = tf_utils.constant_value(training)
if training_value == False:
mean = self.moving_mean
else:
mean = math.reduce_mean(inputs, axis=0)
# Following code copied from normalization.py to update moving mean
if self._support_zero_size_input():
# Keras assumes that batch dimension is the first dimension for
# Batch Normalization.
input_batch_size = array_ops.shape(inputs)[0]
else:
input_batch_size = None
def mean_update():
"""Perform update of moving mean average using copied code."""
self._assign_moving_average(
self.moving_mean, mean, self.momentum, input_batch_size)
self.add_update(mean_update)
# Center inputs
return inputs - mean
def get_config(self):
"""Internal config of this layer."""
config = {
'momentum': self.momentum,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
You can also disable scaling in BatchNormalization
gamma is a learned scaling factor (initialized as 1), which can be disabled by passing scale=False to the constructor.
I am searching for a layer that performs an element-wise division of the input but of course the parameters of this division must be learned, just as those of a standard conv2D layer.
I found this:
https://www.tensorflow.org/api_docs/python/tf/keras/layers/Multiply
but i don't think its what i want, because i want the division parameters to be learned, not divide 2 layers.
With a dense layer, dot products are computed, which is NOT what I want. I am looking for ELEMENT-WISE multiplication/division.
The sample code for a Custom Layer that performs an element-wise division of the input with the parameters(Weights) of this division learned during Training, is shown below:
%tensorflow_version 2.x
from tensorflow import keras
from tensorflow.keras import Input
from tensorflow.keras.layers import Flatten, Dense
from tensorflow.keras.models import Model, Sequential
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.layers import Layer
import numpy as np
class MyLayer(Layer):
def __init__(self, output_dims, **kwargs):
self.output_dims = output_dims
super(MyLayer, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
self.kernel = self.add_weight(name='kernel',
shape=self.output_dims,
initializer='ones',
trainable=True)
super(MyLayer, self).build(input_shape) # Be sure to call this somewhere!
def call(self, x):
# Dividing Input with Weights
return tf.divide(x, self.kernel)
def compute_output_shape(self, input_shape):
return (self.output_dims)
mInput = np.array([[1,2,3,4]])
inShape = (4,)
net = Sequential()
outShape = (4,)
l1 = MyLayer(outShape, input_shape= inShape)
net.add(l1)
net.compile(loss='mean_absolute_error', optimizer='adam', metrics=['accuracy'])
p = net.predict(x=mInput, batch_size=1)
print(p)
Hope this helps. Happy Learning!
I want to create a custom layer that takes in __init__ a internal tensor and a custom dot function so that it computes for a given batch the dot function over all possible pairs made with the batch and the internal tensor.
If I were to use the natural inner product, I could write directly tf.matmul(inputs, self.internal_tensor, transpose_b=True) but I want to be able to give other kernel methods.
MWE:
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Layer
class CustomLayer(Layer):
def __init__(self, internal_tensor, kernel, **kwargs):
super().__init__(**kwargs)
self.internal_tensor = tf.Variable(0., shape=tf.TensorShape((None, 10)), validate_shape=False, name='internal_tensor')
self.internal_tensor.assign(internal_tensor)
self.kernel = kernel
#tf.function
def call(self, inputs, **kwargs):
return self.kernel([
tf.reshape(tf.tile(inputs, [1, self.internal_tensor.shape[0]]), [-1, inputs.shape[1]]), # because no tf.repeat
tf.tile(self.support_tensors, [inputs.shape[0], 1]),
])
custom_layer = CustomLayer(
internal_tensor=tf.convert_to_tensor(np.random.rand(30, 10), tf.float32),
kernel=lambda inputs: inputs[0] + inputs[1],
)
x = np.random.rand(15, 10).astype(np.float32)
custom_layer(x)
# TypeError: Failed to convert object of type <class 'list'> to Tensor. Contents: [1, None]. Consider casting elements to a supported type.
For the sake of clarity, here is the target working layer in Numpy:
class NumpyLayer:
def __init__(self, internal_tensor, kernel):
self.internal_tensor = internal_tensor
self.kernel = kernel
def __call__(self, inputs):
return self.kernel([
np.repeat(inputs, len(self.internal_tensor), axis=0),
np.tile(self.internal_tensor, (len(inputs), 1)),
])
numpy_layer = NumpyLayer(
internal_tensor=internal_tensor,
kernel=lambda inputs: inputs[0] + inputs[1],
)
numpy_layer(x)
So all the troubles came from the use of tf.Tensor.shape instead of tf.shape(tf.Tensor).
Here is a working solution:
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Layer
class CustomLayer(Layer):
def __init__(self, internal_tensor, kernel, **kwargs):
super().__init__(**kwargs)
self.internal_tensor = tf.Variable(0., shape=tf.TensorShape((None, None)), validate_shape=False, name='internal_tensor')
self.internal_tensor.assign(internal_tensor)
self.kernel = kernel
#tf.function
def call(self, inputs, **kwargs):
batch_size = tf.shape(inputs)[0]
return self.kernel([
tf.reshape(tf.tile(inputs, [1, tf.shape(self.internal_tensor)[0]]), [-1, inputs.shape[1]]), # because no tf.repeat
tf.tile(self.internal_tensor, [batch_size, 1]),
])
internal_tensor = np.random.rand(30, 10)
custom_layer = CustomLayer(
internal_tensor=tf.convert_to_tensor(internal_tensor, tf.float32),
kernel=lambda inputs: inputs[0] + inputs[1],
)
x = np.random.rand(10, 10).astype(np.float32)
custom_layer(x)
though there is still a warning:
WARNING:tensorflow:Entity <bound method CustomLayer.call of <tensorflow.python.eager.function.TfMethodTarget object at 0x7f8e7e2d8400>> could not be transformed and will be executed as-is. Please report this to the AutoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting <bound method CustomLayer.call of <tensorflow.python.eager.function.TfMethodTarget object at 0x7f8e7e2d8400>>: ValueError: Unable to locate the source code of <bound method CustomLayer.call of <tensorflow.python.eager.function.TfMethodTarget object at 0x7f8e7e2d8400>>. Note that functions defined in certain environments, like the interactive Python shell do not expose their source code. If that is the case, you should to define them in a .py source file. If you are certain the code is graph-compatible, wrap the call using #tf.autograph.do_not_convert. Original error: could not get source code
I am attempting to use the Gamma function from tfp in a custom Keras loss function using the log_prob method, but the function always returns nan when training starts.
I have tested the loss function and seems to work fine:
import tensorflow as tf
import tensorflow_probability as tfp
tf.enable_eager_execution()
def gamma_loss(y_true, alpha, beta):
gamma_distr = tfp.distributions.Gamma(concentration=alpha, rate=beta)
log_lik_gamma = gamma_distr.log_prob(y_true)
return -tf.reduce_mean(log_lik_gamma)
gamma_loss(100, 2, 2).numpy()
# 194.00854
The problem may be related to the parameters (alpha and beta) that I am passing to the function and that are produced by the final (custom) layer of the model I am using.
This is the full snippet:
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Input, Dense, Layer, Concatenate
from tensorflow.keras.models import Model
from tensorflow.keras.initializers import glorot_normal
import tensorflow_probability as tfp
from sklearn.datasets import make_regression
class GammaLayer(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(GammaLayer, self).__init__(**kwargs)
def build(self, input_shape):
n_weight_rows = 4
self.kernel_2 = self.add_weight(name='kernel_2',
shape=(n_weight_rows, self.output_dim),
initializer=glorot_normal(),
trainable=True)
self.kernel_3 = self.add_weight(name='kernel_3',
shape=(n_weight_rows, self.output_dim),
initializer=glorot_normal(),
trainable=True)
self.bias_2 = self.add_weight(name='bias_2',
shape=(self.output_dim,),
initializer=glorot_normal(),
trainable=True)
self.bias_3 = self.add_weight(name='bias_3',
shape=(self.output_dim,),
initializer=glorot_normal(),
trainable=True)
super(GammaLayer, self).build(input_shape)
def call(self, x):
# Here i use softplus to make the parameters strictly positive
alpha = tf.math.softplus(K.dot(x, self.kernel_2) + self.bias_2)
beta = tf.math.softplus(K.dot(x, self.kernel_3) + self.bias_3)
return [alpha, beta]
def compute_output_shape(self, input_shape):
"""
The assumption is that the output is always one-dimensional
"""
return [(input_shape[0], self.output_dim), (input_shape[0], self.output_dim)]
def gamma_loss(y_true, y_pred):
alpha, beta = y_pred[0], y_pred[1]
gamma_distr = tfp.distributions.Gamma(concentration=alpha, rate=beta)
return -tf.reduce_mean(gamma_distr.log_prob(y_true))
X, y = make_regression(n_samples=1000, n_features=3, noise=0.1)
inputs = Input(shape=(3,))
x = Dense(6, activation='relu')(inputs)
x = Dense(4, activation='relu')(x)
x = GammaLayer(1, name='main_output')(x)
output_params = Concatenate(1, name="pvec")(x)
model = Model(inputs, output_params)
model.compile(loss=gamma_loss, optimizer='adam')
model.fit(X, y, epochs=30, batch_size=10) ```
Can you try adding an additional 1e-6 or so outside the softplus? For very negative values, softplus becomes quite close to zero.