Related
this is a similar question to: How to create a keras layer with a custom gradient in TF2.0?
Only, I would like to introduce a learnable parameter into the custom layer that I am training.
Here's a toy example of my current approach here:
# Method for calculation custom gradient
#tf.custom_gradient
def scaler(x, s):
def grad(upstream):
dy_dx = s
dy_ds = x
return dy_dx, dy_ds
return x * s, grad
# Keras Layer with trainable parameter
class TestLayer(tf.keras.layers.Layer):
def build(self, input_shape):
self.scale = self.add_weight("scale",
shape=[1,],
initializer=tf.keras.initializers.Constant(value=2.0),
trainable=True)
def call(self, inputs):
return scaler(inputs, self.scale)
# Creates Keras Model that uses the layer
def Model():
x_in = tf.keras.layers.Input(shape=(1,))
x_out = TestLayer()(x_in)
return tf.keras.Model(inputs=x_in, outputs=x_out, name="fp8_test")
# Create toy dataset, want to learn `scale` such to satisfy 5 = 2 * scale (i.e, `scale` should learn ~2.5)
def Dataset():
inps = tf.ones(shape=(10**5,)) * 2 # inputs
expected = tf.ones(shape=(10**5,)) * 5 # targets
data_in = tf.data.Dataset.from_tensors(inps)
data_exp = tf.data.Dataset.from_tensors(expected)
dataset = tf.data.Dataset.zip((data_in, data_exp))
return dataset
model = Model()
model.summary()
dataset = Dataset()
# Use `MSE` loss and `SGD` optimizer
model.compile(
loss=tf.keras.losses.MSE,
optimizer=tf.keras.optimizers.SGD(),
)
model.fit(dataset, epochs=100)
This is failing with the following shape related error in the optimizer:
ValueError: Shapes must be equal rank, but are 1 and 2 for '{{node SGD/SGD/update/ResourceApplyGradientDescent}} = ResourceApplyGradientDescent[T=DT_FLOAT, use_locking=true](fp8_test/test_layer_1/ReadVariableOp/resource, SGD/Identity, SGD/IdentityN)' with input shapes: [], [], [100000,1].
I've been staring at the docs for a while, I'm a bit stumped as to why this isn't working, I would really appreciate any input on how to fix this toy example.
Thanks in advance.
I have a 2-channeled imagelike file from which I am cutting patches as training/validation datasets for a convolutional autoencoder. I am using a custom data generator from TensorFlow to use different data for each batch and epoch.
Here is my CustomDataGenerator class:
class CustomDataGenerator(tf.keras.utils.Sequence):
def __init__(self, file, sample_size, batch_size=32, width=28, height=28, resolution=(28, 28)):
'Initialization'
self.sample_size = sample_size
self.batch_size = batch_size
self.resolution = resolution
self.width = width
self.height = height
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(self.sample_size / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
batch = []
for i in range(self.batch_size):
....
x = np.asarray(batch)
x = tf.transpose(x, [0, 2, 3, 1])
return x, x
and training code:
...
train_gen = data_generator.CustomDataGenerator(file=file, sample_size=10000)
val_gen = data_generator.CustomDataGenerator(file=file, sample_size=2000)
history = autoencoder.fit(train_gen, epochs=100, validation_data=val_gen)
...
when I run the code it throws:
ValueError: Failed to find data adapter that can handle input: <class 'data_generator.CustomDataGenerator'>, <class 'NoneType'>
in model.fit line during training.
tensorflow ==2.5.0, keras ==2.4.3
Your __getitem__ method must return X, Y pairs. Why you are returning X, X instead?
The train_gen then is passed to model.fit() to train. The error is due to that you are sending X as Y parameter to your model.fit()
PS: you can use on_epoch_end. This function will be called at the end of every epoch by the fit method.
I am currently using a tf.keras.utils.Sequence object to generate image batches for a CNN. I am using Tensorflow 2.2 and the Model.fit method for the model. When I fit the model, the following warning is thrown in each epoch when I set use_multiprocessing=True in tf.keras.model.fit(...):
WARNING:tensorflow:multiprocessing can interact badly with TensorFlow,
causing nondeterministic deadlocks. For high performance data pipelines tf.data is recommended
The model is optimizing just fine, as expected from the docs and the fact that I am using a Sequence-based generator. But if use_multiprocessing is going to be a deprecated functionality in lieu of tf.data objects, I would like to be using the most up-to-date input pipeline. I currently use the following tf.keras.utils.Sequence-based generator inspired by this article on good practices for partitioning large datasets:
https://stanford.edu/~shervine/blog/keras-how-to-generate-data-on-the-fly
class DataGenerator(keras.utils.Sequence):
'Generates data for Keras'
def __init__(self, list_IDs, labels, data_dir, batch_size=32, dim=(128,128), n_channels=1,
n_classes=2, shuffle=True, **augmentation_kwargs):
'Initialization'
self.dim = dim
self.batch_size = batch_size
self.labels = labels
self.list_IDs = list_IDs
self.data_dir = data_dir
self.n_channels = n_channels
self.n_classes = n_classes
self.shuffle = shuffle
self.on_epoch_end()
self.augmentor = keras.preprocessing.image.ImageDataGenerator(**augmentation_kwargs)
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(len(self.list_IDs) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of IDs
list_IDs_temp = [self.list_IDs[k] for k in indexes]
# Generate data
X, y = self.__data_generation(list_IDs_temp)
return X, y
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.list_IDs))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, list_IDs_temp):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.empty((self.batch_size, *self.dim))
y = np.empty((self.batch_size), dtype=int)
# Generate data
for i, ID in enumerate(list_IDs_temp):
# Store sample
X[i,] = np.load(self.data_dir +'/{}_stars.npy'.format(ID))
# Store class
y[i] = self.labels[ID]
# Reshape and apply augmentation to sample
X,y = self.augmentor.flow(X.reshape(self.batch_size,*self.dim,1),y=y,
shuffle=False,batch_size=self.batch_size)[0]
return X, y
All data from all classes is in the data_dir directory and are stored as individual .npy files. The IDs come from a list of strings. The class labels are taken from a dictionary whose keys are the IDs -- as in the article.
I really like the intuition of the Sequence generator set-up. I can also easily generator random batches to check that it is behaving as I would expect. But how can I reproduce this set-up with tf.data? How do I reproduce the multiprocessing batch generation of a Sequence generator with the interleave and prefetch methods of tf.data.Dataset? And/or can I simply ingest this Sequence-based generator with the tf.data.Dataset.from_generator() method?
Many thanks in advance.
may be to late to answer, but that what I did and it's work fine for me;
1- my class was like that;
class DataGen(Sequence):
def __init__(self, df, sr=8000, seconds=3, batch_size=16, shuffle=True):
self.files = np.array(df.filepath)
self.label = np.array(df.label)
self.batch_size = batch_size
self.shuffle = shuffle
self.sr = sr
self.seconds = seconds
self.dim = self.sr*self.seconds
self.on_epoch_end()
def __len__():
return len(self.label)//self.batch_size
def __getitem__(self, x):
indexs = self.indexs[np.arange(x, x+self.batch_size)]
return self.__getBatch__(indexs)
def __getBatch__(self, indexs):
X, y = [], []
for i in indexs:
wav = self.__loadFile__(self.files[i])
X.append(librosa.feature.mfcc(wav, self.sr).T)
y.append(self.label[i])
return tf.convert_to_tensor(X), to_categorical(y, num_classes=2)
def __loadFile__(self, file):
y, sr = librosa.load(file, sr=8000, mono=True)
if len(y)>self.dim:
return y[:self.dim]
return np.pad(y, (0, self.dim-len(y)), 'constant', constant_values=0)
def on_epoch_end(self):
self.indexs = np.arange(len(self.label))
if self.shuffle:
np.random.shuffle(self.indexs)
2- than I change to a function like follow;
def gen(sr=8000, seconds=3, batch_size=16, shuffle=True):
dim = sr*seconds
def loadFile(file):
wav, _ = librosa.load(file, sr=sr, mono=True)
if len(wav)>dim:
return wav[:dim]
return np.pad(wav, (0, dim-len(wav)), 'constant', constant_values=0)
while True:
indexs = np.arange(len(df))
if shuffle:
np.random.shuffle(indexs)
for x in range(len(df)//batch_size):
X, y = [], []
for i in indexs[np.arange(x*batch_size, (x+1)*batch_size)]:
X.append(librosa.feature.mfcc(loadFile(df.filepath[i]), sr).T)
y.append(df.label[i])
yield tf.convert_to_tensor(X), to_categorical(y, num_classes=2)
3- and works fine:
dataset = tf.data.Dataset.from_generator(gen, (tf.dtypes.float32, tf.dtypes.int32))
here's another method that I use with tensorflow and it's workes fine:
class DataGen():
def __init__(self, df, batch_size=32, shuffle=True):
self.data = np.array(df)
self.indexs = np.arange(self.data.shape[0])
if shuffle:
np.random.shuffle(self.indexs)
self.batch_size = batch_size
def __len__(self):
return self.data.shape[0]//self.batch_size
def get_item(self, x):
# data preprocessing
data, label = self.data[x]
return data, label
def __call__(self):
for i in self.indexs:
yield self.get_item(i)
train_gen = DataGen(train_df)
types = (tf.float32, tf.int32)
shapes = ((1, 500, 201), (n_classes))
batch_size = 32
train_data = Dataset.from_generator(train_gen, output_types=types, output_shapes=shapes)
train_data = train_data.batch(batch_size)
# test
X, y = next(iter(train_data))
print(X.shape, y.shape)
I am using the tf.estimator API and I have the following model_fn function:
def model_fn(features, labels, mode, params):
labels = tf.reshape(labels, (-1, 1))
model = word2vec.create_model(features, params)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=model)
loss = sampled_softmax_loss.create_loss(model['softmax_w'],
model['softmax_b'],
model['relu_layer_1'],
labels,
params['softmax_sample'],
params['vocabulary_size'])
cost = tf.reduce_mean(loss)
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode=mode, loss=cost)
optimizer = adam_optimizer.create_optimizer(params['learning_rate'])
train_operation = optimizer.minimize(cost)
if mode == tf.estimator.ModeKeys.TRAIN:
return tf.estimator.EstimatorSpec(mode=mode, loss=cost, train_op=train_operation)
raise RuntimeError('Not a valid Mode value')
The word2vec.create_model function is given below. The function returns a python dictionary with the interesting nodes of the network (e.g. the embeddings matrix, the softmax weight and bias for training etc.).
def create_model(features, hyper_params):
model = {}
vocabulary_size = hyper_params['vocabulary_size']
hidden_size = hyper_params['hidden_size']
feature_columns = hyper_params['feature_columns']
with tf.variable_scope('word2vec'):
# Creating the Embedding layer
net = tf.feature_column.input_layer(features, feature_columns)
# Creating the hidden layer
net = dense_layer.create_layer(model, net, hidden_size)
# Creating the SoftMax weight and bias variables to use in the sampled loss function
softmax_w = tf.Variable(tf.truncated_normal((vocabulary_size, hidden_size), stddev=0.1), name='softmax_weights')
softmax_b = tf.Variable(tf.zeros(vocabulary_size), name='softmax_bias')
model['softmax_w'] = softmax_w
model['softmax_b'] = softmax_b
return model
Last but not least, my main function, which in turn I use the tf.app.run(main) command to run:
def main():
path = os.path.join('data', 'data.csv')
(train_x, train_y), (test_x, test_y) = prepare_data.load_data(path, path, columns, columns[-1])
vocabulary_size = len(train_x[columns[0]].unique())
feature_columns = []
for key in train_x.keys():
item_id = tf.feature_column.categorical_column_with_identity(key=key, num_buckets=vocabulary_size)
feature_columns.append(tf.feature_column.embedding_column(item_id, 512))
classifier = tf.estimator.Estimator(
model_fn=model_fn,
params={
'feature_columns': feature_columns,
'vocabulary_size': vocabulary_size,
'hidden_size': 256,
'learning_rate': 0.001,
'softmax_sample': 100,
})
print('Training the classifier...')
classifier.train(input_fn=lambda: prepare_data.train_input_fn(train_x, train_y, 128), steps=2)
print('Evaluating on test dataset...')
eval_result = classifier.evaluate(input_fn=lambda: prepare_data.eval_input_fn(test_x, test_y, 128))
print('Printing results...')
print(eval_result)
When I run this, I get a ValueError: GraphDef cannot be larger than 2GB. error. Why is that? What can am I doing wrong?
Below is my train_input_fn:
def train_input_fn(features, labels, batch_size):
def gen():
for f, l in zip(features, labels):
yield f, l
ds = tf.data.Dataset.from_generator(gen, (tf.int64, tf.int64), (tf.TensorShape([None]), tf.TensorShape([None])))
ds = ds.repeat().batch(batch_size)
feature, label = ds.make_one_shot_iterator().get_next()
return {"Input": feature}, label
The dataset is a simple csv like below:
Input Label
0 12600 838
1 12600 4558
2 12600 838
3 12600 4558
4 838 12600
Dataset.from_tensor_slices adds the whole dataset to the computational graph (see details), so better use Dataset.from_generator. I have shown an example of how to do it using mnist:How to load MNIST via TensorFlow (including download)?
I'm using canned estimators and are struggling with poor predict performance so I'm trying to use tf.contrib.predictor to improve my inference performance. I've made this minimalistic example to reproduce my problems:
import tensorflow as tf
from tensorflow.contrib import predictor
def serving_input_fn():
x = tf.placeholder(dtype=tf.string, shape=[1], name='x')
inputs = {'x': x }
return tf.estimator.export.ServingInputReceiver(inputs, inputs)
input_feature_column = tf.feature_column.numeric_column('x', shape=[1])
estimator = tf.estimator.DNNRegressor(
feature_columns=[input_feature_column],
hidden_units=[10, 20, 10],
model_dir="model_dir\\predictor-test")
estimator_predictor = predictor.from_estimator(estimator, serving_input_fn)
estimator_predictor({"inputs": ["1.0"]})
This yields the following exception:
UnimplementedError (see above for traceback): Cast string to float is not supported
[[Node: dnn/input_from_feature_columns/input_layer/x/ToFloat = Cast[DstT=DT_FLOAT, SrcT=DT_STRING, _device="/job:localhost/replica:0/task:0/device:CPU:0"](dnn/input_from_feature_columns/input_layer/x/ExpandDims)]]
I've tried using tf.estimator.export.TensorServingInputReceiver instead of ServingInputReceiver in my serving_input_fn(), so that I can feed my model with a numerical tensor which is what I want:
def serving_input_fn():
x = tf.placeholder(dtype=tf.float32, shape=[1], name='x')
return tf.estimator.export.TensorServingInputReceiver(x, x)
but then I get the following exception in my predictor.from_estimator() call:
ValueError: features should be a dictionary of Tensors. Given type: <class 'tensorflow.python.framework.ops.Tensor'>
Any ideas?
My understanding of all of this is not really solid but I got it working and given the size of the community, I'll try to share what I did.
First, I'm running tensorflow 1.5 binaries with this patch applied manually.
The exact code I'm running is this:
def serving_input_fn():
x = tf.placeholder(dtype=tf.float32, shape=[3500], name='x')
inputs = {'x': x }
return tf.estimator.export.ServingInputReceiver(inputs, inputs)
estimator = tf.estimator.Estimator(
model_fn=model_fn,
model_dir="{}/model_dir_{}/model.ckpt-103712".format(script_dir, 3))
estimator_predictor = tf.contrib.predictor.from_estimator(
estimator, serving_input_fn)
p = estimator_predictor(
{"x": np.array(sample.normalized.input_data)})
My case is a bit different than your example because I'm using a custom Estimator but in your case, I guess you should try something like this:
def serving_input_fn():
x = tf.placeholder(dtype=tf.float32, shape=[1], name='x')
inputs = {'x': x }
return tf.estimator.export.ServingInputReceiver(inputs, inputs)
estimator = ...
estimator_predictor = tf.contrib.predictor.from_estimator(
estimator, serving_input_fn)
estimator_predictor({"x": [1.0]})
error is in following line:
estimator_predictor({"inputs": ["1.0"]})
please put 1.0 out of quotes. Currently it's a string.
After having worked on this for a couple of days, I want to share what I have done. The following code is also available from https://github.com/dage/tensorflow-estimator-predictor-example
TL;DR: predictor works best with custom estimators and the performance increase is massive.
import tensorflow as tf
import numpy as np
import datetime
import time
FEATURES_RANK = 3 # The number of inputs
LABELS_RANK = 2 # The number of outputs
# Returns a numpy array of rank LABELS_RANK based on the features argument.
# Can be used when creating a training dataset.
def features_to_labels(features):
sum_column = features.sum(1).reshape(features.shape[0], 1)
labels = np.hstack((sum_column*i for i in range(1, LABELS_RANK+1)))
return labels
def serving_input_fn():
x = tf.placeholder(dtype=tf.float32, shape=[None, FEATURES_RANK], name='x') # match dtype in input_fn
inputs = {'x': x }
return tf.estimator.export.ServingInputReceiver(inputs, inputs)
def model_fn(features, labels, mode):
net = features["x"] # input
for units in [4, 8, 4]: # hidden units
net = tf.layers.dense(net, units=units, activation=tf.nn.relu)
net = tf.layers.dropout(net, rate=0.1)
output = tf.layers.dense(net, LABELS_RANK, activation=None)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode, predictions=output, export_outputs={"out": tf.estimator.export.PredictOutput(output)})
loss = tf.losses.mean_squared_error(labels, output)
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode, loss=loss)
optimizer = tf.train.AdagradOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
# expecting a numpy array of shape (1, FEATURE_RANK) for constant_feature argument
def input_fn(num_samples, constant_feature = None, is_infinite = True):
feature_values = np.full((num_samples, FEATURES_RANK), constant_feature) if isinstance(constant_feature, np.ndarray) else np.random.rand(num_samples, FEATURES_RANK)
feature_values = np.float32(feature_values) # match dtype in serving_input_fn
labels = features_to_labels(feature_values)
dataset = tf.data.Dataset.from_tensors(({"x": feature_values}, labels))
if is_infinite:
dataset = dataset.repeat()
return dataset.make_one_shot_iterator().get_next()
estimator = tf.estimator.Estimator(
model_fn=model_fn,
model_dir="model_dir\\estimator-predictor-test-{date:%Y-%m-%d %H.%M.%S}".format(date=datetime.datetime.now()))
train = estimator.train(input_fn=lambda : input_fn(50), steps=500)
evaluate = estimator.evaluate(input_fn=lambda : input_fn(20), steps=1)
predictor = tf.contrib.predictor.from_estimator(estimator, serving_input_fn)
consistency_check_features = np.random.rand(1, FEATURES_RANK)
consistency_check_labels = features_to_labels(consistency_check_features)
num_calls_predictor = 100
predictor_input = {"x": consistency_check_features}
start_time_predictor = time.clock()
for i in range(num_calls_predictor):
predictor_prediction = predictor(predictor_input)
delta_time_predictor = 1./num_calls_predictor*(time.clock() - start_time_predictor)
num_calls_estimator_predict = 10
estimator_input = lambda : input_fn(1, consistency_check_features, False)
start_time_estimator_predict = time.clock()
for i in range(num_calls_estimator_predict):
estimator_prediction = list(estimator.predict(input_fn=estimator_input))
delta_time_estimator = 1./num_calls_estimator_predict*(time.clock() - start_time_estimator_predict)
print("{} --> {}\n predictor={}\n estimator={}.\n".format(consistency_check_features, consistency_check_labels, predictor_prediction, estimator_prediction))
print("Time used per estimator.predict() call: {:.5f}s, predictor(): {:.5f}s ==> predictor is {:.0f}x faster!".format(delta_time_estimator, delta_time_predictor, delta_time_estimator/delta_time_predictor))
On my laptop I get the following results:
[[0.55424854 0.98057611 0.98604857]] --> [[2.52087322 5.04174644]]
predictor={'output': array([[2.5221248, 5.049496 ]], dtype=float32)}
estimator=[array([2.5221248, 5.049496 ], dtype=float32)].
Time used per estimator.predict() call: 0.30071s, predictor(): 0.00057s ==> predictor is 530x faster!