barriers&prefecth error when using batch_sequences_with_states - tensorflow

I encounter an error related to barriers when using tf.contrib.training.batch_sequences_with_states.
I write a handmade data to tfrecords format, it's contain 100 examples, each example is a variable length sequence with 50 dimension features.
test_list = list()
for i in range(100):
l = [[], []]
llen = int(random.uniform(100, 500))
for j in range(llen):
l[0].append([j*0.1]*50)
l[1].append(j)
test_list.append(l)
writer = tf.python_io.TFRecordWriter("my_test.sequence.tfrecords")
try:
for (idx, i) in enumerate(test_list):
example = tf.train.SequenceExample(
context=tf.train.Features(
feature={
'id' : tf.train.Feature(bytes_list=tf.train.BytesList(value=['list'+str(idx)+'_'])),
'length' : tf.train.Feature(int64_list=tf.train.Int64List(value=[len(i[0])]))
}
),
feature_lists=tf.train.FeatureLists(
feature_list={
'feat' : tf.train.FeatureList(feature=[tf.train.Feature(float_list=tf.train.FloatList(value=frame)) for frame in i[0]]),
'label' : tf.train.FeatureList(feature=[tf.train.Feature(int64_list=tf.train.Int64List(value=[frame])) for frame in i[1]])
}
)
)
writer.write(example.SerializeToString())
finally:
writer.close()
Then I read and decode the tfrecods dataset, and use tf.contrib.training.batch_sequences_with_states to batch the sequence inputs.
def read_and_decode(filename, num_epochs=None):
context_features={
'id' : tf.FixedLenFeature([], dtype=tf.string),
'length' : tf.FixedLenFeature([], dtype=tf.int64)
}
sequence_features = {
"feat": tf.FixedLenSequenceFeature([50], dtype=tf.float32, allow_missing=False),
"label": tf.FixedLenSequenceFeature([], dtype=tf.int64, allow_missing=False)
}
filename_queue = tf.train.string_input_producer([filename], num_epochs)
reader = tf.TFRecordReader()
key, serialized_example = reader.read(filename_queue)
context_parsed, sequence_parsed = tf.parse_single_sequence_example(
serialized=serialized_example,
context_features=context_features,
sequence_features=sequence_features)
return (key, context_parsed, sequence_parsed)
key, context_parsed, sequence_parsed = read_and_decode("my_test.sequence.tfrecords")
batch_size = 16
num_unroll = 8
num_enqueue_threads = 2
hidden_size = 256
cell = tf.contrib.rnn.GRUCell(hidden_size)
initial_state_values = tf.zeros(cell.state_size, dtype=tf.float32)
initial_states = {"gru": initial_state_values}
batch = tf.contrib.training.batch_sequences_with_states(
input_key=context_parsed['id'],
input_sequences=sequence_parsed,
input_context=context_parsed,
initial_states=initial_states,
num_unroll=num_unroll,
batch_size=batch_size,
input_length = tf.cast(context_parsed["length"], tf.int32),
pad = True,
num_threads=num_enqueue_threads,
capacity= 1000 + batch_size * num_enqueue_threads * 2,
make_keys_unique=False,
allow_small_batch=True,
name='batch_sequence')
k = batch.key
nk = batch.next_key
inputs = batch.sequences['feat']
labels = batch.sequences['label']
wid = batch.context['id']
inputs_by_time = tf.split(inputs, num_unroll, 1)
inputs_by_time = [tf.squeeze(elem, squeeze_dims=1) for elem in inputs_by_time]
assert len(inputs_by_time) == num_unroll
outputs, state = tf.contrib.rnn.static_state_saving_rnn(
cell,
inputs_by_time,
sequence_length=batch.context["length"],
state_saver=batch,
state_name='gru')
with tf.device('/cpu:0'):
sess = tf.Session()
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
sess.run(init_op)
fetches = [k, nk, wid]
feed_dict={}
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess,coord=coord,daemon=False)
try:
while True:
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
k, nk, wid\
= sess.run(fetches)
print('key: %s' % k)
print('next key: %s' % nk)
print('id: %s' % wid)
except tf.errors.OutOfRangeError:
print("done training")
finally:
print('request stop ............')
coord.request_stop()
coord.join(threads)
sess.close()
But I get an error enter image description here
The error message shows the barrie inserted an existed key. It seems like the graph cannot prefetch the second time step of a sequence. Maybe the error is from prefetch_op?
If I set make_keys_unique=True, the batch also cannot reach to second time step.
enter image description here

Related

AttributeError: 'list' object has no attribute 'cuda'

I try to develop Convolution network deep learning for face recognition and right now when i try to run it said 'list' object has no attribute 'cuda' im not sure what went wrong can anyone check. the code below is for train the whole module and below that is for load the data
if name == 'main':
#set_trace()
args = edict({
'operation' : 'train',
'feature_file' : None,
'result_sample_path' : None,
'gpu' : 'GPU',
'path_image_query' : None,
'query_label' : 'Query label',
'dataset' : None,
'specific_dataset_folder_name' : 'lfw',
'img_extension' : 'jpg',
'preprocessing_method' : 'sphereface',
'model_name' : 'mobiface',
'batch_size' : 3,
'image_query':'/content/drive/My Drive/recfaces13/recfaces/datasets/LFW',
'train':True})
# selecting the size of the crop based on the network
if args.model_name == 'mobilefacenet' or args.model_name == 'sphereface':
crop_size = (96, 112)
elif args.model_name == 'mobiface' or args.model_name == 'shufflefacenet':
crop_size = (112, 112)
elif args.model_name == 'openface':
crop_size = (96, 96)
elif args.model_name == 'facenet':
crop_size = (160, 160)
else:
raise NotImplementedError("Model " + args.model_name + " not implemented")
if args.dataset is not None:
# process whole dataset
assert args.specific_dataset_folder_name is not None, 'To process a dataset, ' \
'the flag --specific_dataset_folder_name is required.'
process_dataset(args.operation, args.model_name, args.batch_size,
args.dataset, args.specific_dataset_folder_name,
args.img_extension, args.preprocessing_method, crop_size,
args.result_sample_path, args.feature_file)
#elif args.image_query is not None:
# process unique image
# dataset = ImageDataLoader(args.image_query, args.preprocessing_method,
# crop_size, args.operation == 'extract_features')
# dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=2, drop_last=False)
# features = None
elif args.operation == 'train':
##########set_trace()
net = load_net('mobilefacenet', 'gpu')
net = net.cuda()
model_name=args.model_name
dataset = LFW(args.image_query,args.specific_dataset_folder_name, args.img_extension, args.preprocessing_method, crop_size, args.train)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=2, drop_last=False)
# data_counter_per_class = np.zeros((len(dataloader)))
# for i in range(len(dataloader)):
# path = os.path.join('image_query', dataloader[i])
# files = get_files_from_folder(path)
# data_counter_per_class[i] = len(files)
# test_counter = np.round(data_counter_per_class * (1 - train_ratio))
#dataloader1=dataloader.split(',')
#train,test=train_test_split(dataloader,test_size=0.2)
#trainloader = torch.utils.data.DataLoader(dataset, batch_size=4, shuffle=True, num_workers=2, drop_last=False)
# testloader = torch.utils.data.DataLoader(dataset, batch_size=4, shuffle=False, num_workers=2, drop_last=False) //create path//
#create array of data path split that data path and
features = None
if args.feature_file is not None and os.path.isfile(args.feature_file):
features = scipy.io.loadmat(args.feature_file)
epoch = 2
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
train_loss = list()
#set_trace()
for i, data in enumerate(dataloader):
inps, labs = data
inps, labs = inps.cuda(args['device']), labs.cuda(args['device'])
inps.squeeze_(0)
labs.squeeze_(0)
inps = Variable(inps).cuda(args['device'])
labs = Variable(labs).cuda(args['device'])
optimizer.zero_grad()
outs = net(inps)
soft_outs = F.softmax(outs, dim=1)
prds = soft_outs.data.max(1)[1]
loss = criterion(outs, labs)
loss.backward()
optimizer.step()
prds = prds.squeeze_(1).squeeze_(0).cpu().numpy()
inps_np = inps.detach().squeeze(0).cpu().numpy()
labs_np = labs.detach().squeeze(0).cpu().numpy()
train_loss.append(loss.data.item())
print('[epoch %d], [iter %d / %d], [train loss %.5f]' % (epoch, i + 1, len(train_loader), np.asarray(train_loss).mean()))
Dataloader
class LFW(object):
def __init__(self, root, specific_folder, img_extension, preprocessing_method=None, crop_size=(96, 112)):
"""
Dataloader of the LFW dataset.
root: path to the dataset to be used.
specific_folder: specific folder inside the same dataset.
img_extension: extension of the dataset images.
preprocessing_method: string with the name of the preprocessing method.
crop_size: retrieval network specific crop size.
"""
self.preprocessing_method = preprocessing_method
self.crop_size = crop_size
self.imgl_list = []
self.classes = []
self.people = []
self.model_align = None
# read the file with the names and the number of images of each people in the dataset
with open(os.path.join(root, 'people.txt')) as f:
people = f.read().splitlines()[1:]
# get only the people that have more than 20 images
for p in people:
p = p.split('\t')
if len(p) > 1:
if int(p[1]) >= 20:
for num_img in range(1, int(p[1]) + 1):
self.imgl_list.append(os.path.join(root, specific_folder, p[0], p[0] + '_' +
'{:04}'.format(num_img) + '.' + img_extension))
self.classes.append(p[0])
self.people.append(p[0])
le = preprocessing.LabelEncoder()
self.classes = le.fit_transform(self.classes)
print(len(self.imgl_list), len(self.classes), len(self.people))
def __getitem__(self, index):
imgl = imageio.imread(self.imgl_list[index])
cl = self.classes[index]
# if image is grayscale, transform into rgb by repeating the image 3 times
if len(imgl.shape) == 2:
imgl = np.stack([imgl] * 3, 2)
imgl, bb = preprocess(imgl, self.preprocessing_method, crop_size=self.crop_size,
is_processing_dataset=True, return_only_largest_bb=True, execute_default=True)
# append image with its reverse
imglist = [imgl, imgl[:, ::-1, :]]
# normalization
for i in range(len(imglist)):
imglist[i] = (imglist[i] - 127.5) / 128.0
imglist[i] = imglist[i].transpose(2, 0, 1)
imgs = [torch.from_numpy(i).float() for i in imglist]
return imgs, cl, imgl, bb, self.imgl_list[index], self.people[index]
def __len__(self):
return len(self.imgl_list)

Triplet-Loss using pre-trained network

I am trying to use the Triple-Loss technique to fine-tune an EfficientNet network for human Re-ID using Keras. Here is the code I am using:
This is the generator:
class SampleGen(object):
def __init__(self, file_class_mapping):
self.file_class_mapping = file_class_mapping
self.class_to_list_files = defaultdict(list)
self.list_all_files = list(file_class_mapping.keys())
self.range_all_files = list(range(len(self.list_all_files)))
for file, class_ in file_class_mapping.items():
self.class_to_list_files[class_].append(file)
self.list_classes = list(set(self.file_class_mapping.values()))
self.range_list_classes = range(len(self.list_classes))
self.class_weight = np.array([len(self.class_to_list_files[class_]) for class_ in self.list_classes])
self.class_weight = self.class_weight / np.sum(self.class_weight)
def get_sample(self):
class_idx = np.random.choice(self.range_list_classes, 1, p=self.class_weight)[0]
examples_class_idx = np.random.choice(range(len(self.class_to_list_files[self.list_classes[class_idx]])), 2)
positive_example_1, positive_example_2 = \
self.class_to_list_files[self.list_classes[class_idx]][examples_class_idx[0]], \
self.class_to_list_files[self.list_classes[class_idx]][examples_class_idx[1]]
negative_example = None
while negative_example is None or self.file_class_mapping[negative_example] == \
self.file_class_mapping[positive_example_1]:
negative_example_idx = np.random.choice(self.range_all_files, 1)[0]
negative_example = self.list_all_files[negative_example_idx]
return positive_example_1, negative_example, positive_example_2
def read_and_resize(filepath):
im = Image.open((filepath)).convert('RGB')
im = im.resize((image_size, image_size))
return np.array(im, dtype="float32")
def augment(im_array):
if np.random.uniform(0, 1) > 0.9:
im_array = np.fliplr(im_array)
return im_array
def gen(triplet_gen):
while True:
list_positive_examples_1 = []
list_negative_examples = []
list_positive_examples_2 = []
for i in range(batch_size):
positive_example_1, negative_example, positive_example_2 = triplet_gen.get_sample()
path_pos1 = join(path_train, positive_example_1)
path_neg = join(path_train, negative_example)
path_pos2 = join(path_train, positive_example_2)
positive_example_1_img = read_and_resize(path_pos1)
negative_example_img = read_and_resize(path_neg)
positive_example_2_img = read_and_resize(path_pos2)
positive_example_1_img = augment(positive_example_1_img)
negative_example_img = augment(negative_example_img)
positive_example_2_img = augment(positive_example_2_img)
list_positive_examples_1.append(positive_example_1_img)
list_negative_examples.append(negative_example_img)
list_positive_examples_2.append(positive_example_2_img)
A = preprocess_input(np.array(list_positive_examples_1))
B = preprocess_input(np.array(list_positive_examples_2))
C = preprocess_input(np.array(list_negative_examples))
label = None
yield {'anchor_input': A, 'positive_input': B, 'negative_input': C}, label
This is how I create the model:
def get_model():
base_model = efn.EfficientNetB3(weights='imagenet', include_top=False)
for layer in base_model.layers:
layer.trainable = False
x = base_model.output
x = Dropout(0.6)(x)
x = Dense(embedding_dim)(x)
x = Lambda(lambda x: K.l2_normalize(x, axis=1), name="enc_out")(x)
embedding_model = Model(base_model.input, x, name="embedding")
input_shape = (image_size, image_size, 3)
anchor_input = Input(input_shape, name='anchor_input')
positive_input = Input(input_shape, name='positive_input')
negative_input = Input(input_shape, name='negative_input')
anchor_embedding = embedding_model(anchor_input)
positive_embedding = embedding_model(positive_input)
negative_embedding = embedding_model(negative_input)
inputs = [anchor_input, positive_input, negative_input]
outputs = [anchor_embedding, positive_embedding, negative_embedding]
triplet_model = Model(inputs, outputs)
triplet_model.add_loss(K.mean(triplet_loss(outputs)))
return embedding_model, triplet_model
And this is how I'm trying to run the training:
if __name__ == '__main__':
data = pd.read_csv(path_csv)
train, test = train_test_split(data, train_size=0.7, random_state=1337)
file_id_mapping_train = {k: v for k, v in zip(train.Image.values, train.Id.values)}
file_id_mapping_test = {k: v for k, v in zip(test.Image.values, test.Id.values)}
gen_tr = gen(SampleGen(file_id_mapping_train))
gen_te = gen(SampleGen(file_id_mapping_test))
embedding_model, triplet_model = get_model()
for i, layer in enumerate(embedding_model.layers):
print(i, layer.name, layer.trainable)
for layer in embedding_model.layers[379:]:
layer.trainable = True
for layer in embedding_model.layers[:379]:
layer.trainable = False
triplet_model.compile(loss=None, optimizer=Adam(0.0001))
history = triplet_model.fit(x=gen_tr,
validation_data=gen_te,
epochs=10,
verbose=1,
steps_per_epoch=200,
validation_steps=20,
callbacks=create_callbacks())
The csv contains two columns (Image, Id) and I am generating triplets on the go using a generator. The layer 379 is the last layer of the network so I just leave that as trainable. I let it run for some epochs and it seems like it doesn't converge, it stays around 2.30. On epochs like 20, the loss is even higher than what I've started with. Here you can see what I mean: train example Is there anything wrong with the way I think about the problem?
Thank you!

Tensorflow : Predict in Recurrent Neural Networks for Drawing Classification tutorial

I used the tutorial code from https://www.tensorflow.org/tutorials/recurrent_quickdraw and all works fine until I tried to make a prediction instead of just evaluate it.
I wrote a new input function for prediction, based on the code in create_dataset.py
def predict_input_fn():
def parse_line(stroke_points):
"""Parse an ndjson line and return ink (as np array) and classname."""
inkarray = json.loads(stroke_points)
stroke_lengths = [len(stroke[0]) for stroke in inkarray]
total_points = sum(stroke_lengths)
np_ink = np.zeros((total_points, 3), dtype=np.float32)
current_t = 0
for stroke in inkarray:
for i in [0, 1]:
np_ink[current_t:(current_t + len(stroke[0])), i] = stroke[i]
current_t += len(stroke[0])
np_ink[current_t - 1, 2] = 1 # stroke_end
# Preprocessing.
# 1. Size normalization.
lower = np.min(np_ink[:, 0:2], axis=0)
upper = np.max(np_ink[:, 0:2], axis=0)
scale = upper - lower
scale[scale == 0] = 1
np_ink[:, 0:2] = (np_ink[:, 0:2] - lower) / scale
# 2. Compute deltas.
np_ink = np_ink[1:, 0:2] - np_ink[0:-1, 0:2]
np_ink = np_ink[1:, :]
features = {}
features["ink"] = tf.train.Feature(float_list=tf.train.FloatList(value=np_ink.flatten()))
features["shape"] = tf.train.Feature(int64_list=tf.train.Int64List(value=np_ink.shape))
f = tf.train.Features(feature=features)
example = tf.train.Example(features=f)
#t = tf.constant(np_ink)
return example
def parse_example(example):
"""Parse a single record which is expected to be a tensorflow.Example."""
# feature_to_type = {
# "ink": tf.VarLenFeature(dtype=tf.float32),
# "shape": tf.FixedLenFeature((0,2), dtype=tf.int64)
# }
feature_to_type = {
"ink": tf.VarLenFeature(dtype=tf.float32),
"shape": tf.FixedLenFeature([2], dtype=tf.int64)
}
example_proto = example.SerializeToString()
parsed_features = tf.parse_single_example(example_proto, feature_to_type)
parsed_features["ink"] = tf.sparse_tensor_to_dense(parsed_features["ink"])
#parsed_features["shape"].set_shape((2))
return parsed_features
example = parse_line(FLAGS.predict_input_stroke_data)
features = parse_example(example)
dataset = tf.data.Dataset.from_tensor_slices(features)
# Our inputs are variable length, so pad them.
dataset = dataset.padded_batch(FLAGS.batch_size, padded_shapes=dataset.output_shapes)
iterator = dataset.make_one_shot_iterator()
next_feature_batch = iterator.get_next()
return next_feature_batch, None # In prediction, we have no labels
I modified the existing model_fn() function and added below at appropirate place
predictions = tf.argmax(logits, axis=1)
if mode == tf.estimator.ModeKeys.PREDICT:
preds = {
"class_index": predictions,
"probabilities": tf.nn.softmax(logits),
'logits': logits
}
return tf.estimator.EstimatorSpec(mode, predictions=preds)
However when i call the following the code
if (FLAGS.predict_input_stroke_data != None):
# prepare_input_tfrecord_for_prediction()
# predict_results = estimator.predict(input_fn=get_input_fn(
# mode=tf.estimator.ModeKeys.PREDICT,
# tfrecord_pattern=FLAGS.predict_input_temp_file,
# batch_size=FLAGS.batch_size))
predict_results = estimator.predict(input_fn=predict_input_fn)
for idx, prediction in enumerate(predict_results):
type = prediction["class_ids"][0] # Get the predicted class (index)
print("Prediction Type: {}\n".format(type))
I get the following error, what is wrong in my code could anyone please help me. I have tried quite a few things to get the shape right but i am unable to. I also tried to first write my strokes data as a tfrecord and then use the existing input_fn to read from the tfrecord that gives me similar errors but slighly different
File "/Users/farooq/.virtualenvs/tensor1.0/lib/python3.6/site-packages/tensorflow/python/framework/common_shapes.py", line 627, in call_cpp_shape_fn
require_shape_fn)
File "/Users/farooq/.virtualenvs/tensor1.0/lib/python3.6/site-packages/tensorflow/python/framework/common_shapes.py", line 691, in _call_cpp_shape_fn_impl
raise ValueError(err.message)
ValueError: Shape must be rank 2 but is rank 1 for 'Slice' (op: 'Slice') with input shapes: [?], [2], [2].
I finally solved the problem by taking my input keystrokes, writing them to disk as a TFRecord. I also had to write the same inputstrokes batch_size times to same TFRecord, else i got the shape mismatch errors. And then invoking predict worked.
The main addition for prediction was the following function
def create_tfrecord_for_prediction(batch_size, stoke_data, tfrecord_file):
def parse_line(stoke_data):
"""Parse provided stroke data and ink (as np array) and classname."""
inkarray = json.loads(stoke_data)
stroke_lengths = [len(stroke[0]) for stroke in inkarray]
total_points = sum(stroke_lengths)
np_ink = np.zeros((total_points, 3), dtype=np.float32)
current_t = 0
for stroke in inkarray:
if len(stroke[0]) != len(stroke[1]):
print("Inconsistent number of x and y coordinates.")
return None
for i in [0, 1]:
np_ink[current_t:(current_t + len(stroke[0])), i] = stroke[i]
current_t += len(stroke[0])
np_ink[current_t - 1, 2] = 1 # stroke_end
# Preprocessing.
# 1. Size normalization.
lower = np.min(np_ink[:, 0:2], axis=0)
upper = np.max(np_ink[:, 0:2], axis=0)
scale = upper - lower
scale[scale == 0] = 1
np_ink[:, 0:2] = (np_ink[:, 0:2] - lower) / scale
# 2. Compute deltas.
#np_ink = np_ink[1:, 0:2] - np_ink[0:-1, 0:2]
#np_ink = np_ink[1:, :]
np_ink[1:, 0:2] -= np_ink[0:-1, 0:2]
np_ink = np_ink[1:, :]
features = {}
features["ink"] = tf.train.Feature(float_list=tf.train.FloatList(value=np_ink.flatten()))
features["shape"] = tf.train.Feature(int64_list=tf.train.Int64List(value=np_ink.shape))
f = tf.train.Features(feature=features)
ex = tf.train.Example(features=f)
return ex
if stoke_data is None:
print("Error: Stroke data cannot be none")
return
example = parse_line(stoke_data)
#Remove the file if it already exists
if tf.gfile.Exists(tfrecord_file):
tf.gfile.Remove(tfrecord_file)
writer = tf.python_io.TFRecordWriter(tfrecord_file)
for i in range(batch_size):
writer.write(example.SerializeToString())
writer.flush()
writer.close()
Then in the main function you just have to invoke estimator.predict() reusing the same input_fn=get_input_fn(...)argument except point it to the temporary created tfrecord_file
Hope this helps

Tensorflow : train on mini batch, fast then slow

I am a beginner in tensorflow and I am trying to train a model using "mini batch". To do that I created a generator and iterate it. The problem I encounter is that, at the beginning of the epoch, the train seems fast (many batch per seconds) then the train slow down (1 batch per second) so I am wondering where I am wrong in my code but I do not find the problem.
def prepare_data(filename):
'''load file which give path and label for the data'''
f = open(filename, 'r')
data = [line.split() for line in f]
feat =[]
label=[]
for l in data:
feat.append(l[0])
label.append(l[1])
n_samples = len(feat)
shuf = list(range(n_samples))
random.shuffle(shuf)
count = Counter(label)
print(count)
feature = [feat[i] for i in shuf]
label = np.array(label, dtype=np.int)
return feature, label[shuf]
def get_specgrams(paths, nsamples=16000):
'''
Given list of paths, return specgrams.
'''
# read the wav files
wavs = [wavfile.read(x)[1] for x in paths]
# zero pad the shorter samples and cut off the long ones.
data = []
for wav in wavs:
if wav.size < 16000:
d = np.pad(wav, (nsamples - wav.size, 0), mode='constant')
else:
d = wav[0:nsamples]
data.append(d)
# get the specgram
#specgram = [signal.spectrogram(d, nperseg=256, noverlap=128)[2] for d in data]
#specgram = [s.reshape(129, 124, -1) for s in specgram]
return np.asarray(data)
def get_specgram(path, nsamples=16000):
'''
Given path, return specgrams.
'''
# read the wav files
wav = wavfile.read(path)[1]
# zero pad the shorter samples and cut off the long ones.
if wav.size < 16000:
d = np.pad(wav, (nsamples - wav.size, 0), mode='constant')
else:
d = wav[0:nsamples]
# get the specgram
#specgram = [signal.spectrogram(d, nperseg=256, noverlap=128)[2] for d in data]
#specgram = [s.reshape(129, 124, -1) for s in specgram]
return d
# multci classification binary labels
def one_hot_encode(labels, n_unique_labels=31):
n_labels = len(labels)
#print('number of unique labels:', n_unique_labels)
one_hot_encode = np.zeros((n_labels,n_unique_labels))
one_hot_encode[np.arange(n_labels), labels] = 1
return np.array(one_hot_encode, dtype=np.int)
#create_path_file('train/audio/')
def model(tr_features, tr_labels, ts_features, ts_labels):
# remove gpu device error
config = tf.ConfigProto(allow_soft_placement = True)
# parameters
BATCH_SIZE = 4
number_loop = math.ceil(len(tr_features)/BATCH_SIZE)
training_epochs = 10
n_dim = 16000
n_classes = 31 #len(np.unique(ts_labels))
n_hidden_units_one = 280
n_hidden_units_two = 300
sd = 1 / np.sqrt(n_dim)
learning_rate = 0.1
# get test data
ts_features, ts_labels = get_data(ts_features, ts_labels)
# Model
X = tf.placeholder(tf.float32,[None,n_dim])
Y = tf.placeholder(tf.float32,[None,n_classes])
W_1 = tf.Variable(tf.random_normal([n_dim,n_hidden_units_one], mean = 0, stddev=sd))
b_1 = tf.Variable(tf.random_normal([n_hidden_units_one], mean = 0, stddev=sd))
h_1 = tf.nn.tanh(tf.matmul(X,W_1) + b_1)
W_2 = tf.Variable(tf.random_normal([n_hidden_units_one,n_hidden_units_two], mean = 0, stddev=sd))
b_2 = tf.Variable(tf.random_normal([n_hidden_units_two], mean = 0, stddev=sd))
h_2 = tf.nn.sigmoid(tf.matmul(h_1,W_2) + b_2)
W = tf.Variable(tf.random_normal([n_hidden_units_two,n_classes], mean = 0, stddev=sd))
b = tf.Variable(tf.random_normal([n_classes], mean = 0, stddev=sd))
y_ = tf.nn.softmax(tf.matmul(h_2,W) + b)
init = tf.initialize_all_variables()
# function and optimizers
cost_function = -tf.reduce_sum(Y * tf.log(y_))
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost_function)
correct_prediction = tf.equal(tf.argmax(y_,1), tf.argmax(Y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# train loop
cost_history = np.empty(shape=[1],dtype=float)
y_true, y_pred = None, None
with tf.Session(config = config) as sess:
sess.run(init)
for epoch in range(training_epochs):
print(' ## Epoch n°', epoch+1 )
batch = batch_generator(BATCH_SIZE, tr_features, tr_labels)
acc_total = 0.0
for cpt, (train_features_batch, train_labels_batch) in enumerate(batch):
_,cost = sess.run([optimizer,cost_function],feed_dict={X:train_features_batch,Y:train_labels_batch})
cost_history = np.append(cost_history,cost)
correct_prediction = tf.equal(tf.argmax(y_,1), tf.argmax(Y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
acc = accuracy.eval(feed_dict={X:train_features_batch,Y:train_labels_batch})
acc_total = (acc_total*cpt + acc)/(cpt+1)
print('Train accuracy : ', acc_total, '[',str(cpt+1), '/',str(number_loop), ']' ,flush=True, end='\r')
clear_output()
print('Train accuracy : ', acc_total)
y_pred = sess.run(tf.argmax(y_,1),feed_dict={X: ts_features})
y_true = sess.run(tf.argmax(ts_labels,1))
print('Test accuracy: ', round(sess.run(accuracy, feed_dict={X: ts_features, Y: ts_labels}) , 3))
fig = plt.figure(figsize=(10,8))
plt.plot(cost_history)
plt.axis([0,training_epochs,0,np.max(cost_history)])
plt.show()
p,r,f,s = precision_recall_fscore_support(y_true, y_pred, average='micro')
print("F-Score:", round(f,3))
def batch_generator(batch_size, feat_path, labels):
n_sample = len(feat_path)
ite = math.ceil(n_sample/batch_size)
for i in range(0, ite):
if i == ite-1:
label = one_hot_encode(labels[-batch_size:])
feat = get_specgrams(feat_path[-batch_size:])
yield (feat, label)
else:
label = one_hot_encode(labels[i*batch_size:i*batch_size+batch_size])
feat = get_specgrams(feat_path[i*batch_size:i*batch_size+batch_size])
yield (feat, label)
def get_data(feat_path, labels):
feat = get_specgrams(feat_path)
label = one_hot_encode(labels)
return feat, label
def __main__():
print('## Load data and shuffle')
feat_path, labels = prepare_data('data_labelised2.txt')
idx = int(len(labels)*0.8)
print("## Create Model")
model(feat_path[0:idx], labels[0:idx], feat_path[idx+1:], labels[idx+1:])
with tf.device('/gpu:0'):
__main__()

TensorFlow: string_input_producer output string not in order with shuffle = False

I got 2 list of filenames like filenames_L = [1a,2a,3a,4a,...] and filenames_R = [1b,2b,3b,4b,...] , and I use the code below to make 2 queues.
"""queue for left images"""
filenames_L = reader.file_name('stereo_dataset/fly_frames_cleanpass/TRAIN', 'Left', 'png')
png = filenames_L[0].lower().endswith('png') # If first file is a png, assume they all are
filenames_L = tf.convert_to_tensor(filenames_L)
filename_queue_L = tf.train.string_input_producer(filenames_L, shuffle=False, num_epochs=FLAGS.epoch)
reader_L = tf.WholeFileReader()
name_L, img_bytes_L = reader_L.read(filename_queue_L)
image_L = tf.image.decode_png(img_bytes_L, channels=3) if png else tf.image.decode_jpeg(img_bytes_L, channels=3)
processed_image_L = image_preprocessing_fn(image_L, FLAGS.height, FLAGS.width)
processed_images_L = tf.train.batch([processed_image_L], FLAGS.batch_size, dynamic_pad=True)
"""queue for right images"""
filenames_R = reader.file_name('stereo_dataset/fly_frames_cleanpass/TRAIN', 'Right', 'png')
filenames_R = tf.convert_to_tensor(filenames_R)
filename_queue_R = tf.train.string_input_producer(filenames_R, shuffle=False, num_epochs=FLAGS.epoch)
reader_R = tf.WholeFileReader()
name_R, img_bytes_R = reader_R.read(filename_queue_R)
image_R = tf.image.decode_png(img_bytes_R, channels=3) if png else tf.image.decode_jpeg(img_bytes_R, channels=3)
processed_image_R = image_preprocessing_fn(image_R, FLAGS.height, FLAGS.width)
processed_images_R = tf.train.batch([processed_image_R], FLAGS.batch_size, dynamic_pad=True)
And then I use the code below to get their names.
with tf.Session(config=config) as sess:
sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
start_time = time.time()
while not coord.should_stop():
_, loss_t, step, name_Left, name_Right = sess.run([train_op, loss, global_step, name_L, name_R], feed_dict={disparity_map: disparity})
What I got from name_Left, name_Right are (3a,3b), (5a, 5b)....But I expected it outputs like(1a,1b), (2a, 2b)...
I had a similar issue using tf.train.batch and tf.train.shuffle_batch.
The problem was in the use of threads (similar to this: https://github.com/tensorflow/tensorflow/issues/410).
I solved it by setting only one thread. When I did that, the order was preserved.