I am currently using a tf.keras.utils.Sequence object to generate image batches for a CNN. I am using Tensorflow 2.2 and the Model.fit method for the model. When I fit the model, the following warning is thrown in each epoch when I set use_multiprocessing=True in tf.keras.model.fit(...):
WARNING:tensorflow:multiprocessing can interact badly with TensorFlow,
causing nondeterministic deadlocks. For high performance data pipelines tf.data is recommended
The model is optimizing just fine, as expected from the docs and the fact that I am using a Sequence-based generator. But if use_multiprocessing is going to be a deprecated functionality in lieu of tf.data objects, I would like to be using the most up-to-date input pipeline. I currently use the following tf.keras.utils.Sequence-based generator inspired by this article on good practices for partitioning large datasets:
https://stanford.edu/~shervine/blog/keras-how-to-generate-data-on-the-fly
class DataGenerator(keras.utils.Sequence):
'Generates data for Keras'
def __init__(self, list_IDs, labels, data_dir, batch_size=32, dim=(128,128), n_channels=1,
n_classes=2, shuffle=True, **augmentation_kwargs):
'Initialization'
self.dim = dim
self.batch_size = batch_size
self.labels = labels
self.list_IDs = list_IDs
self.data_dir = data_dir
self.n_channels = n_channels
self.n_classes = n_classes
self.shuffle = shuffle
self.on_epoch_end()
self.augmentor = keras.preprocessing.image.ImageDataGenerator(**augmentation_kwargs)
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(len(self.list_IDs) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of IDs
list_IDs_temp = [self.list_IDs[k] for k in indexes]
# Generate data
X, y = self.__data_generation(list_IDs_temp)
return X, y
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.list_IDs))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, list_IDs_temp):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.empty((self.batch_size, *self.dim))
y = np.empty((self.batch_size), dtype=int)
# Generate data
for i, ID in enumerate(list_IDs_temp):
# Store sample
X[i,] = np.load(self.data_dir +'/{}_stars.npy'.format(ID))
# Store class
y[i] = self.labels[ID]
# Reshape and apply augmentation to sample
X,y = self.augmentor.flow(X.reshape(self.batch_size,*self.dim,1),y=y,
shuffle=False,batch_size=self.batch_size)[0]
return X, y
All data from all classes is in the data_dir directory and are stored as individual .npy files. The IDs come from a list of strings. The class labels are taken from a dictionary whose keys are the IDs -- as in the article.
I really like the intuition of the Sequence generator set-up. I can also easily generator random batches to check that it is behaving as I would expect. But how can I reproduce this set-up with tf.data? How do I reproduce the multiprocessing batch generation of a Sequence generator with the interleave and prefetch methods of tf.data.Dataset? And/or can I simply ingest this Sequence-based generator with the tf.data.Dataset.from_generator() method?
Many thanks in advance.
may be to late to answer, but that what I did and it's work fine for me;
1- my class was like that;
class DataGen(Sequence):
def __init__(self, df, sr=8000, seconds=3, batch_size=16, shuffle=True):
self.files = np.array(df.filepath)
self.label = np.array(df.label)
self.batch_size = batch_size
self.shuffle = shuffle
self.sr = sr
self.seconds = seconds
self.dim = self.sr*self.seconds
self.on_epoch_end()
def __len__():
return len(self.label)//self.batch_size
def __getitem__(self, x):
indexs = self.indexs[np.arange(x, x+self.batch_size)]
return self.__getBatch__(indexs)
def __getBatch__(self, indexs):
X, y = [], []
for i in indexs:
wav = self.__loadFile__(self.files[i])
X.append(librosa.feature.mfcc(wav, self.sr).T)
y.append(self.label[i])
return tf.convert_to_tensor(X), to_categorical(y, num_classes=2)
def __loadFile__(self, file):
y, sr = librosa.load(file, sr=8000, mono=True)
if len(y)>self.dim:
return y[:self.dim]
return np.pad(y, (0, self.dim-len(y)), 'constant', constant_values=0)
def on_epoch_end(self):
self.indexs = np.arange(len(self.label))
if self.shuffle:
np.random.shuffle(self.indexs)
2- than I change to a function like follow;
def gen(sr=8000, seconds=3, batch_size=16, shuffle=True):
dim = sr*seconds
def loadFile(file):
wav, _ = librosa.load(file, sr=sr, mono=True)
if len(wav)>dim:
return wav[:dim]
return np.pad(wav, (0, dim-len(wav)), 'constant', constant_values=0)
while True:
indexs = np.arange(len(df))
if shuffle:
np.random.shuffle(indexs)
for x in range(len(df)//batch_size):
X, y = [], []
for i in indexs[np.arange(x*batch_size, (x+1)*batch_size)]:
X.append(librosa.feature.mfcc(loadFile(df.filepath[i]), sr).T)
y.append(df.label[i])
yield tf.convert_to_tensor(X), to_categorical(y, num_classes=2)
3- and works fine:
dataset = tf.data.Dataset.from_generator(gen, (tf.dtypes.float32, tf.dtypes.int32))
here's another method that I use with tensorflow and it's workes fine:
class DataGen():
def __init__(self, df, batch_size=32, shuffle=True):
self.data = np.array(df)
self.indexs = np.arange(self.data.shape[0])
if shuffle:
np.random.shuffle(self.indexs)
self.batch_size = batch_size
def __len__(self):
return self.data.shape[0]//self.batch_size
def get_item(self, x):
# data preprocessing
data, label = self.data[x]
return data, label
def __call__(self):
for i in self.indexs:
yield self.get_item(i)
train_gen = DataGen(train_df)
types = (tf.float32, tf.int32)
shapes = ((1, 500, 201), (n_classes))
batch_size = 32
train_data = Dataset.from_generator(train_gen, output_types=types, output_shapes=shapes)
train_data = train_data.batch(batch_size)
# test
X, y = next(iter(train_data))
print(X.shape, y.shape)
Related
I have a peculiar case of slow model training while trying to train using a generator. The reason I need to use a generator is because I have multiple parquet files that cannot be loaded into memory at once. Here is the code snippet without a generator
d_df = pd.read_parquet("..")
label = pd_df.pop("label")
dataset = tf.data.Dataset.from_tensor_slices((dict(pd_df), label))
# alternate
# dataset = createDataset(bucket,prefix)
def is_test(x, y):
return x % 4 == 0
def is_train(x, y):
return not is_test(x, y)
recover = lambda x, y: y
val_dataset = dataset.enumerate() \
.filter(is_test) \
.map(recover).batch(batch_size)
train_dataset = dataset.enumerate() \
.filter(is_train) \
.map(recover).batch(batch_size)
feature_columns = _create_feature_columns()
feature_layer = tf.keras.layers.DenseFeatures(feature_columns)
model = tf.keras.Sequential([
feature_layer,
layers.Dense(1280, activation='relu'),
layers.Dense(512, activation='relu'),
layers.Dense(1280, activation='relu'),
layers.Dense(1)
])
model.compile(optimizer='adam',
loss=tf.keras.losses.MeanSquaredError(),
metrics=['accuracy', 'mean_absolute_error'])
om_model.fit(train_dataset, epochs=10, validation_data=val_dataset, verbose=1)
This runs with each steps 295ms. Naturally since its not possible to load all my data in one go I wrote the following generator ( P.S. I'm new to TF and my generator may be off but from what I could find online it looks good to me).
def getSplit(original_list, n):
return [original_list[i:i + n] for i in range(0, len(original_list), n)]
#
# 200 files -> 48 Mb (1 file)
# 15 files in memory at a time
# 5 generators
# 3 files per generator
#
def pandasGenerator(s3files, n=3):
print(f"Processing: {s3files} to : {tf.get_static_value(s3files)}")
s3files = tf.get_static_value(s3files)
s3files = [str(s3file)[2:-1] for s3file in s3files]
batches = getSplit(s3files, n)
for batch in batches:
t = time.process_time()
print(f"Processing Batch: {batch}")
panda_ds = pd.concat([pd.read_parquet(s3file) for s3file in batch], ignore_index=True)
elapsed_time = time.process_time() - t
print(f"base_read_time: {elapsed_time}")
for row in panda_ds.itertuples(index=False):
pan_row = dict(row._asdict())
labels = pan_row.pop('label')
yield dict(pan_row), labels
return
def createDS(s3bucket, s3prefix):
s3files = getFileLists(bucket=s3bucket, prefix=s3prefix)
dataset = (tf.data.Dataset.from_tensor_slices(getSplit(s3files, 40))
.interleave(
lambda files: tf.data.Dataset.from_generator(pandasGenerator, output_signature=(
{
}, tf.TensorSpec(shape=(), dtype=tf.float64)),
args=(files, 3)),
num_parallel_calls=tf.data.AUTOTUNE
)).prefetch(tf.data.AUTOTUNE)
return dataset
When using the generator the per step jumps to 2s.
I'd appreciate any help in improving the generator. Thanks.
I have a made a custom data generator that outputs batches of image sequences of shape (batch size, sequence length, image height, image width, channels), along with two labels y1 and y2.
However, I cant seem to retrieve the final (incomplete) batch during training. Any ideas where I am going wrong?
class DataGenerator(tf.keras.utils.Sequence):
'Generates data for Keras'
def __init__(self, list_IDs, labels, training_set=False, batch_size=32, dim=(224, 224), n_channels=3, shuffle=True):
'Initialization'
self.dim = dim
self.batch_size = batch_size
self.labels = labels
self.training_set = training_set
self.list_IDs = list_IDs
self.n_channels = n_channels
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
num_batchs_per_epoch = int(np.floor(len(self.list_IDs) / self.batch_size))
return num_batchs_per_epoch
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
start = index*self.batch_size
end = (index+1)*self.batch_size
indexes = self.indexes[start:end]
# Find list of IDs
list_IDs_temp = [self.list_IDs[k] for k in indexes]
# Generate data
X, y1, y2 = self.__data_generation(list_IDs_temp)
return X, [y1, y2]
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.list_IDs))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, list_IDs_temp):
'Generates data containing batch_size samples' # X : (n_samples, 3, *dim, n_channels)
# Initialization
X = np.empty((self.batch_size, 3, *self.dim, self.n_channels))
y1 = np.empty((self.batch_size), dtype=float)
y2 = np.empty((self.batch_size), dtype=int)
# Generate data
for i, ID in enumerate(list_IDs_temp):
sequence = [s for s in ID]
f0, f1, f2 = [self.load_resize_image(image) for image in sequence]
# preprocess steps
f0 = self.preprocess(f0, self.training_set)
f1 = self.preprocess(f1, self.training_set)
f2 = self.preprocess(f2, self.training_set)
triplet = np.concatenate((f0,f1,f2), axis=0)
X[i,:,:,:,:] = triplet
ID = tuple(ID)
y1[i] = self.labels[ID][0]
y2[i] = self.labels[ID][1]
return X, y1, y2
def preprocess(self, img, training_set):
if self.training_set:
# apply transformations
gen = ImageDataGenerator()
img[0,:,:,:] = gen.apply_transform(x=img[0,:,:,:], transform_parameters={'theta':random.uniform(-180, 180),
'brightness': random.uniform(0.8, 1.2),
'flip_horizontal': random.getrandbits(1),
'shear': random.uniform(0,5),
'zx': random.uniform(0.9,1.1),
'zy': random.uniform(0.9,1.1),
'flip_vertical': random.getrandbits(1)
})
return img
def load_resize_image(self, image):
img = cv2.imread(image)
img = cv2.resize(img, dsize=(224, 224), interpolation=cv2.INTER_CUBIC)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_array = np.array(img)
img_array = np.expand_dims(img_array, 0)
return img_array
And at training...
history = model.fit(
training_generator,
epochs=epochs,
validation_data=validation_generator,
callbacks=callbacks
)
The code will always omit the last batch of data, due to this line of code:
int(np.floor(len(self.list_IDs) / self.batch_size))
See the example below:
number_of_samples = 1002
batch_size = 4
num_batches_per_epoch = int(np.floor(number_of_samples / 4))
num_batches_per_epoch (=250, if number_of_samples == 1000,1001,1002,1003)
The way the dataset is written, it will always omit one batch, which is not a problem, since in essence it is incomplete.
As you are shuffling at the end of each epoch:
if self.shuffle == True:
np.random.shuffle(self.indexes)
the not seen few samples in an epoch will definitely be seen in later epochs.
I am using a Transformer network for machine translation, during training of model the GPU runs out of memory during large dataset, it works fine with small data.
This is the self attention part, The error comes during the computation of matrices.
import tensorflow as tf
class SelfAttention(tf.keras.layers.Layer):
def __init__(self, embed_size, head):
super(SelfAttention, self).__init__()
self.head = head
self.embed_size = embed_size
self.head_dim = embed_size // head
assert (self.head_dim * head == embed_size), 'size of head_dim is not matching'
self.query = tf.keras.layers.Dense(self.head_dim, activation='linear', use_bias=False)
self.value = tf.keras.layers.Dense(self.head_dim, activation='linear', use_bias=False)
self.key = tf.keras.layers.Dense(self.head_dim, activation='linear', use_bias=False)
self.fc_layer = tf.keras.layers.Dense(self.embed_size, activation='linear')
def call(self, value, key, query, mask):
# Number of training examples
N = query.shape[0]
query_len, value_len, key_len = query.shape[1], value.shape[1], key.shape[1]
# Reshape according to the number of examples and words
query = tf.reshape(query, (N, query_len, self.head, self.head_dim))
value = tf.reshape(value, (N, value_len, self.head, self.head_dim))
key = tf.reshape(key, (N, key_len, self.head, self.head_dim))
query = self.query(query)
value = self.value(value)
key = self.key(key)
# energy shape: (N, head, query_len, key_len) try to imagine the shape in mind
energy = tf.einsum("nqhd, nkhd->nhqk", query, key)
if mask is not None:
energy = energy * mask
energy = tf.where(tf.equal(energy, 0), -1e20, energy)
attention = tf.keras.activations.softmax(energy, axis=3)
# attention shape: (N, head, query_len, key_len)
# value shape:(N, value_len, head, head_dim)
# output: (N, query_len, head, head_dim)
output = tf.reshape(tf.einsum("nhql, nlhd->nqhd", attention, value), (N, query_len, self.head*self.head_dim))
output = tf.keras.activations.linear(output)
return output
The error is
2021-09-20 11:51:49.615495: I tensorflow/core/common_runtime/bfc_allocator.cc:1036] 1 Chunks of size 35477760 totalling 33.83MiB
2021-09-20 11:51:49.615502: I tensorflow/core/common_runtime/bfc_allocator.cc:1036] 1 Chunks of size 40866304 totalling 38.97MiB
2021-09-20 11:51:49.615509: I tensorflow/core/common_runtime/bfc_allocator.cc:1036] 1 Chunks of size 47409664 totalling 45.21MiB
2021-09-20 11:51:49.615516: I tensorflow/core/common_runtime/bfc_allocator.cc:1036] 1 Chunks of size 47547136 totalling 45.34MiB
/opt/conda/lib/python3.7/site-packages/tensorflow/python/framework/ops.py in raise_from_not_ok_status(e, name)
6860 message = e.message + (" name: " + name if name is not None else "")
6861 # pylint: disable=protected-access
-> 6862 six.raise_from(core._status_to_exception(e.code, message), None)
6863 # pylint: enable=protected-access
6864
/opt/conda/lib/python3.7/site-packages/six.py in raise_from(value, from_value)
ResourceExhaustedError: OOM when allocating tensor with shape[32,334,25335] and type float on /job:localhost/replica:0/task:0/device:GPU:0 by allocator GPU_0_bfc [Op:BiasAdd]
What should I do?
You can use a generator to load just a part of the dataset in the GPU memory and with that you will be able to train with your model.
Here is an example of a simple generator for image classification that you need to adjust to your use for NLP:
class DataGenerator(keras.utils.Sequence):
'Generates data for Keras'
def __init__(self, list_IDs, labels, batch_size=32, dim=(32,32,32), n_channels=1,
n_classes=10, shuffle=True):
'Initialization'
self.dim = dim
self.batch_size = batch_size
self.labels = labels
self.list_IDs = list_IDs
self.n_channels = n_channels
self.n_classes = n_classes
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(len(self.list_IDs) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of IDs
list_IDs_temp = [self.list_IDs[k] for k in indexes]
# Generate data
X, y = self.__data_generation(list_IDs_temp)
return X, y
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.list_IDs))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, list_IDs_temp):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.empty((self.batch_size, *self.dim, self.n_channels))
y = np.empty((self.batch_size), dtype=int)
# Generate data
for i, ID in enumerate(list_IDs_temp):
# Store sample
X[i,] = np.load('data/' + ID + '.npy')
# Store class
y[i] = self.labels[ID]
return X, keras.utils.to_categorical(y, num_classes=self.n_classes)
And then pass it to .fit
params = {'dim': (32,32,32),
'batch_size': 64,
'n_classes': 6,
'n_channels': 1,
'shuffle': True}
# Datasets
partition = # IDs
labels = # Labels
# Generators
training_generator = DataGenerator(partition['train'], labels, **params)
validation_generator = DataGenerator(partition['validation'], labels, **params)
model.fit_generator(generator=training_generator,
validation_data=validation_generator)
I am using tensorflow 1.7.0 in eager execution mode. I have the model working, but none of the examples that I have found for saving the model work.
This is the code that I am using:
checkpoint_directory ='./JokeWords/'
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
checkpoint = tfe.Checkpoint(model=model,optimizer=optimizer) # save as "x"
checkpoint.restore(tf.train.latest_checkpoint(checkpoint_directory))
evaluate(model,jokes,2,32)
....
checkpoint.save(file_prefix=checkpoint_prefix)
I have trained the model and use evaluate to check the results when loading from a restart. Each time I get a random result from evaluate, meaning that the model is not loading from the data, but instead only having random weights.
How do I save the model? It can take days to train one of these.
Edit. Here is the model:
class EagerRNN(tfe.Network):
def __init__(self,embedding, hidden_dim, num_layers, keep_ratio):
super(EagerRNN, self).__init__()
self.keep_ratio = keep_ratio
self.cells = self._add_cells([
tf.nn.rnn_cell.BasicLSTMCell(num_units=hidden_dim)
for _ in range(num_layers)
])
self.backcells = self._add_cells([
tf.nn.rnn_cell.BasicLSTMCell(num_units=hidden_dim)
for _ in range(num_layers)
])
self.linear = layers.Dense(embedding. vocab_size, kernel_initializer=tf.random_uniform_initializer(-0.1, 0.1))
self.backlinear = layers.Dense(embedding. vocab_size, kernel_initializer=tf.random_uniform_initializer(-0.1, 0.1))
self.attension = layers.Dense(hidden_dim, kernel_initializer=tf.random_uniform_initializer(-0.1, 0.1))
def call(self, input_seq,seq_lengths, training):
lengths=[i[0] for i in seq_lengths]
nRotations=max(lengths)
batchSize=input_seq.shape[0]
input_seq2 = tf.unstack(input_seq, num=int(input_seq.shape[1]), axis=1)
atten = None
state = self.cells[0].zero_state(batchSize, tf.float32)
for i in range(0,nRotations):
for j in range(0,len(self.cells)):
c=self.cells[j]
inp=input_seq2[i]
output, state = c(inp, state)
#input_seq2[i]=(output)
if atten==None:
atten =self.linear(output)
else:
atten=atten+self.linear(output)
for i in range(nRotations-1,-1,-1):
for j in range(0,len(self.backcells)):
c=self.backcells[j]
inp=input_seq2[i]
output, state = c(inp, state)
#input_seq2[i]=(output)
atten=atten+self.backlinear(output)
#input_seq = tf.stack(input_seq2[0:nRotations], axis=1)
atten=self.attension(atten)
if training:
input_seq = tf.nn.dropout(input_seq, self.keep_ratio)
# Returning a list instead of a single tensor so that the line:
# y = self.rnn(y, ...)[0]
# in PTBModel.call works for both this RNN and CudnnLSTM (which returns a
# tuple (output, output_states).
return input_seq,state,atten
def _add_cells(self, cells):
# "Magic" required for keras.Model classes to track all the variables in
# a list of Layer objects.
# TODO(ashankar): Figure out API so user code doesn't have to do this.
for i, c in enumerate(cells):
setattr(self, "cell-%d" % i, c)
return cells
class EagerLSTM_Model(tfe.Network):
"""LSTM for word language modeling.
Model described in:
(Zaremba, et. al.) Recurrent Neural Network Regularization
http://arxiv.org/abs/1409.2329
See also:
https://github.com/tensorflow/models/tree/master/tutorials/rnn/ptb
"""
def __init__(self,
embedding,
hidden_dim,
num_layers,
dropout_ratio,
use_cudnn_rnn=True):
super(EagerLSTM_Model, self).__init__()
self.keep_ratio = 1 - dropout_ratio
self.use_cudnn_rnn = use_cudnn_rnn
self.embedding = embedding
if self.use_cudnn_rnn:
self.rnn = cudnn_rnn.CudnnLSTM(
num_layers, hidden_dim, dropout=dropout_ratio)
else:
self.rnn = EagerRNN(embedding,hidden_dim, num_layers, self.keep_ratio)
self.unrnn = EagerUnRNN(embedding,hidden_dim, num_layers, self.keep_ratio)
def callRNN(self, input_seq,seq_lengths, training):
y = self.embedding.callbatchword(input_seq)
if training:
y = tf.nn.dropout(y, self.keep_ratio)
y,state,atten = self.rnn.call(y,seq_lengths, training=training)
return state,atten
def callUnRNN (self,state,atten,seq_lengths, training ):
x,state = self.unrnn(state,atten,seq_lengths,training=training)
#b=tf.reshape(y, self._output_shape)
#c=self.linear(b)
return x
tfe.Network is not (easily) Checkpointable and it will soon be deprecated. Prefer to subclass tf.Keras.Model instead. So if you change class EagerRNN(tfe.Network) to class EagerRNN(tf.keras.Model) and class EagerLSTM_Model(tfe.Network) to class EagerLSTM_Model(tf.keras.Model), checkpoint.save(file_prefix=checkpoint_prefix) should actually save all your variables and checkpoint.restore(tf.train.latest_checkpoint(checkpoint_directory)) should restore them.
When using an input function to feed training data to tf.learn what is the best method in tf 1.0?
I was using learn.dataframe.queues.feeding_functions.enqueue_data in tf0.12 but having explored tf.train.batch, tf.train.slice_input_producer and tf.FIFOQueue I am not sure how to rewrite this for tf1.0.
The code I was using is
def pandas_input_fn(x, y=None, batch_size=128, num_epochs=None):
def input_fn():
if y is not None:
x.loc[:,'y'] = y
queue = learn.dataframe.queues.feeding_functions.enqueue_data(
x, 1000, shuffle=num_epochs is None, num_epochs=num_epochs)
if num_epochs is None:
features = queue.dequeue_many(batch_size)
else:
features = queue.dequeue_up_to(batch_size)
features = dict(zip(['index'] + list(x.columns), features))
if y is not None:
target = features.pop('y')
return features, target
return features
return input_fn
from https://medium.com/#ilblackdragon/tensorflow-tutorial-part-4-958c29c717a0#.4vwo2ato7