ValueError: Fetch argument <tf.Tensor 'IteratorGetNext:0' shape=(?, 120, 120, 3) dtype=float32> cannot be interpreted as a Tensor - tensorflow

Here, I am training my network using using the dataset which is stored in .tfrecord file. This dataset includes images and object poses. But whenever I run this code, I am getting the following error which is mentioned at the bottom.
def _parse_image_function(example_proto):
image_feature_description = {
'height': tf.FixedLenFeature([], tf.int64),
'width': tf.FixedLenFeature([], tf.int64),
'depth': tf.FixedLenFeature([], tf.int64),
'label': tf.FixedLenFeature([], tf.string),
'image_raw': tf.FixedLenFeature([], tf.string),
}
# Parse the input tf.Example proto using the dictionary above.
example = tf.parse_single_example(example_proto, image_feature_description)
height_feature = example['height'] # get byte string
width_feature = example['width'] # get byte string
depth_feature = example['depth'] # get byte string
image_raw_feature = example['image_raw'] # get byte string
label_feature = example['label'] # get byte string
images = tf.parse_tensor(example['image_raw'], out_type=tf.int32) # restore 2D array from byte string
images = tf.cast(images, dtype=tf.float32)/255.0
images = tf.reshape(images, [120, 120, 3])
image_label = tf.parse_tensor(label_feature, out_type=tf.float64) # restore 2D array from byte string
return images, image_label
def get_batched_dataset(filenames):
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(_parse_image_function)
dataset = dataset.cache() # This dataset fits in RAM
dataset = dataset.repeat()
dataset = dataset.shuffle(20000)
dataset = dataset.batch(BATCH_SIZE) # drop_remainder will be needed on TPU
#dataset = dataset.prefetch(AUTO) #
return dataset
def get_training_dataset():
return get_batched_dataset(training_filenames)
def get_validation_dataset():
return get_batched_dataset(validation_filenames)
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(kernel_size=(3, 3), filters=64, dtype='float32', input_shape=(120, 120, 3)),
tf.keras.layers.Activation('relu'),
tf.keras.layers.Conv2D(kernel_size=(3, 3), filters=64, use_bias=True),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Activation('relu'),
tf.keras.layers.MaxPooling2D(pool_size=2),
tf.keras.layers.Conv2D(kernel_size=(3, 3), filters=32, use_bias=True),
#tf.keras.layers.BatchNormalization(),
tf.keras.layers.Activation('relu'),
tf.keras.layers.MaxPooling2D(pool_size=2),
tf.keras.layers.Conv2D(kernel_size=(3, 3), filters=32, use_bias=True),
#tf.keras.layers.BatchNormalization(),
tf.keras.layers.Activation('relu'),
tf.keras.layers.MaxPooling2D(pool_size=2),
tf.keras.layers.Conv2D(kernel_size=(3, 3), filters=16, use_bias=True),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Activation('relu'),
tf.keras.layers.MaxPooling2D(pool_size=2),
tf.keras.layers.Conv2D(kernel_size=(3, 3), filters=8, use_bias=True),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Activation('relu'),
tf.keras.layers.MaxPooling2D(pool_size=2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, use_bias = True),
tf.keras.layers.Activation('relu'),
#tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(12, activation='linear', name='fc')])
model.compile(optimizer='Adam', loss='mse', metrics=['mae', 'mse']) # mean absolute error
#model.summary()
logdir = os.path.join("logs", datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
tensorboard_callback = tf.keras.callbacks.TensorBoard(logdir, histogram_freq=1)
history = model.fit_generator(get_training_dataset(), steps_per_epoch=steps_per_epoch, epochs=100, validation_data=get_validation_dataset(), validation_steps=validation_steps, callbacks=[tensorboard_callback])
Whenever I run this code, I am getting this error:
ValueError: Fetch argument <tf.Tensor 'IteratorGetNext:0' shape=(?, 120, 120, 3) dtype=float32> cannot be interpreted as a Tensor. (Tensor Tensor("IteratorGetNext:0", shape=(?, 120, 120, 3), dtype=float32) is not an element of this graph.)
It is important to note that I am using tensorflow 1.8.

Related

How to use layer output as argument to subsequent layer

I need to add a Cropping2D layer where the left and right crop arguments are determined dynamically by the output of previous layers. I.E., the left_crop and right_crop arguments are not known at code-time. However, I seem unable to access the value of a previous tensor in the model. Here is my code:
input1 = Input(name='dirty', shape=(IMG_HEIGHT, None, 1), dtype='float32')
input2 = Input(name='x0', shape=(), dtype='int32')
input3 = Input(name='x1', shape=(), dtype='int32')
# Encoder
conv1 = Conv2D(48, kernel_size=(3, 3), activation='relu', padding='same', name='conv1')(input1)
pool1 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool1')(conv1)
conv2 = Conv2D(64, kernel_size=(3, 3), activation='relu', padding='same', name='conv2')(pool1)
# Decoder
deconv2 = Conv2DTranspose(48, kernel_size=(3, 3), activation='relu', padding='same', name='deconv2')(conv2)
depool1 = UpSampling2D(size=(2, 2), name='depool1')(deconv2)
output1 = Conv2DTranspose(1, kernel_size=(3, 3), activation='relu', padding='same', name='clean')(depool1)
_, _, width, _ = K.int_shape(output1)
left = K.eval(input2)
right = width - K.eval(input3)
output2 = Cropping2D(name='clean_snippet', cropping=((0, 0), (left, right)))(output1)
That produces the following error:
Traceback (most recent call last):
File "test.py", line 81, in <module>
left = K.eval(input2)
File "/Users/garnet/Library/Python/3.8/lib/python/site-packages/keras/backend.py", line 1632, in eval
return get_value(to_dense(x))
File "/Users/garnet/Library/Python/3.8/lib/python/site-packages/keras/backend.py", line 4208, in get_value
return x.numpy()
AttributeError: 'KerasTensor' object has no attribute 'numpy'
I'm using TF 2.10.0 with Keras 2.10.0. I've tried both with and without eager mode enabled. My question is specifically about the four lines after the "HERE'S THE AREA IN QUESTION..." comment in my code above. How can I access previous layer values to use them as an argument (not the input layer) to Cropping2D(). Any ideas?
For context, here's my entire code:
import tensorflow as tf
import cv2
import random
import os
import numpy as np
from tensorflow.keras import backend as K
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.layers import Conv2D, Conv2DTranspose, MaxPooling2D, Cropping2D, UpSampling2D, Input
from tensorflow.keras import losses
SNIPPET_WIDTH = 40
IMG_HEIGHT = 60
def get_data(paths):
for path in paths:
clean = cv2.imread(path.decode('utf-8'), cv2.IMREAD_GRAYSCALE)
h, w = clean.shape
dirty = cv2.blur(clean, (random.randint(1, 5), random.randint(1, 5)))
x0 = random.randint(0, w - SNIPPET_WIDTH)
x1 = x0 + SNIPPET_WIDTH
y0 = 0
y1 = h - 1
clean_snippet = clean[y0:y1, x0:x1]
dirty[y0:y1, x0:x1] = 0 # masked out region
dirty = (256. - dirty.astype(np.float32)) / 255.
dirty = tf.convert_to_tensor(np.expand_dims(dirty, axis=2))
x0 = tf.convert_to_tensor(x0)
x1 = tf.convert_to_tensor(x1)
clean = (256. - clean.astype(np.float32)) / 255.
clean = tf.convert_to_tensor(np.expand_dims(clean, axis=2))
clean_snippet = (256. - clean_snippet.astype(np.float32)) / 255.
clean_snippet = tf.convert_to_tensor(np.expand_dims(clean_snippet, axis=2))
yield {'dirty': dirty, 'x0': x0, 'x1': x1}, {'clean': clean, 'clean_snippet': clean_snippet}
train_directory = 'data/training/'
files = os.listdir(train_directory)
paths = []
for f in files:
filename = os.fsdecode(f)
paths.append(train_directory + filename)
train_ds = tf.data.Dataset.from_generator(get_data, args=[paths], output_signature=(
{
'dirty': tf.TensorSpec(shape=(IMG_HEIGHT, None, 1), dtype=tf.float32),
'x0': tf.TensorSpec(shape=(), dtype=tf.int32),
'x1': tf.TensorSpec(shape=(), dtype=tf.int32)
},
{
'clean': tf.TensorSpec(shape=(IMG_HEIGHT, None, 1), dtype=tf.float32),
'clean_snippet': tf.TensorSpec(shape=(IMG_HEIGHT, None, 1), dtype=tf.float32)
}
))
bucket_sizes = [400, 500, 600, 700, 800]
bucket_batch_sizes = [16, 16, 16, 16, 16, 16]
train_ds = train_ds.bucket_by_sequence_length(element_length_func=lambda x, y: tf.shape(y['clean'])[1],
bucket_boundaries=bucket_sizes, bucket_batch_sizes=bucket_batch_sizes)
input1 = Input(name='dirty', shape=(IMG_HEIGHT, None, 1), dtype='float32')
input2 = Input(name='x0', shape=(), dtype='int32')
input3 = Input(name='x1', shape=(), dtype='int32')
# Encoder
conv1 = Conv2D(48, kernel_size=(3, 3), activation='relu', padding='same', name='conv1')(input1)
pool1 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool1')(conv1)
conv2 = Conv2D(64, kernel_size=(3, 3), activation='relu', padding='same', name='conv2')(pool1)
# Decoder
deconv2 = Conv2DTranspose(48, kernel_size=(3, 3), activation='relu', padding='same', name='deconv2')(conv2)
depool1 = UpSampling2D(size=(2, 2), name='depool1')(deconv2)
output1 = Conv2DTranspose(1, kernel_size=(3, 3), activation='relu', padding='same', name='clean')(depool1)
# HERE'S THE AREA IN QUESTION...
_, _, width, _ = K.int_shape(output1)
left = K.eval(input2)
right = width - K.eval(input3)
output2 = Cropping2D(name='clean_snippet', cropping=((0, 0), (left, right)))(output1)
# ...END AREA IN QUESTION
model = Model(inputs=[input1, input2, input3], outputs=[output1, output2])
optimizer = SGD(lr=0.02, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=5)
loss_fcns = {'clean': losses.MeanAbsoluteError(), 'clean_snippet': losses.MeanAbsoluteError()}
model.compile(loss=losses.MeanAbsoluteError(), optimizer=optimizer, metrics=['acc'])
model.fit(x=train_ds, y=None, epochs=1000, shuffle=True, verbose=1)
Here's the working solution inspired by #Yaoshiang's comment:
import tensorflow as tf
import cv2
import random
import os
import numpy as np
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.layers import Conv2D, Conv2DTranspose, MaxPooling2D, Cropping2D, UpSampling2D, Input, Multiply
from tensorflow.keras import losses
SNIPPET_WIDTH = 40
IMG_HEIGHT = 60
def normalize(img):
return np.expand_dims((256. - img.astype(np.float32)) / 255., axis=2)
def get_data(paths):
for path in paths:
clean = cv2.imread(path.decode('utf-8'), cv2.IMREAD_GRAYSCALE)
h, w = clean.shape
dirty = cv2.blur(clean, (random.randint(1, 5), random.randint(1, 5)))
x0 = random.randint(0, w - SNIPPET_WIDTH)
x1 = x0 + SNIPPET_WIDTH
y0 = 0
y1 = h - 1
dirty[y0:y1, x0:x1] = 0 # masked out region
dirty = normalize(dirty)
clean = normalize(clean)
mask = np.zeros((h, w, 1), dtype=np.float32)
mask[:, x0:x1, :] = 1.0
clean_snippet = clean * mask
clean = tf.convert_to_tensor(clean)
dirty = tf.convert_to_tensor(dirty)
mask = tf.convert_to_tensor(mask)
clean_snippet = tf.convert_to_tensor(clean_snippet)
yield {'dirty': dirty, 'mask': mask}, {'clean': clean, 'clean_snippet': clean_snippet}
train_directory = 'data/training/'
files = os.listdir(train_directory)
paths = []
for f in files:
filename = os.fsdecode(f)
paths.append(train_directory + filename)
train_ds = tf.data.Dataset.from_generator(get_data, args=[paths], output_signature=(
{
'dirty': tf.TensorSpec(shape=(IMG_HEIGHT, None, 1), dtype=tf.float32),
'mask': tf.TensorSpec(shape=(IMG_HEIGHT, None, 1), dtype=tf.float32)
},
{
'clean': tf.TensorSpec(shape=(IMG_HEIGHT, None, 1), dtype=tf.float32),
'clean_snippet': tf.TensorSpec(shape=(IMG_HEIGHT, None, 1), dtype=tf.float32)
}
))
bucket_sizes = [400, 500, 600, 700, 800]
bucket_batch_sizes = [16, 16, 16, 16, 16, 16]
train_ds = train_ds.bucket_by_sequence_length(element_length_func=lambda x, y: tf.shape(y['clean'])[1],
bucket_boundaries=bucket_sizes, bucket_batch_sizes=bucket_batch_sizes)
input1 = Input(name='dirty', shape=(IMG_HEIGHT, None, 1), dtype='float32')
input2 = Input(name='mask', shape=(IMG_HEIGHT, None, 1), dtype='float32')
# Encoder
conv1 = Conv2D(48, kernel_size=(3, 3), activation='relu', padding='same', name='conv1')(input1)
pool1 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool1')(conv1)
conv2 = Conv2D(64, kernel_size=(3, 3), activation='relu', padding='same', name='conv2')(pool1)
# Decoder
deconv2 = Conv2DTranspose(48, kernel_size=(3, 3), activation='relu', padding='same', name='deconv2')(conv2)
depool1 = UpSampling2D(size=(2, 2), name='depool1')(deconv2)
output1 = Conv2DTranspose(1, kernel_size=(3, 3), activation='relu', padding='same', name='clean')(depool1)
output2 = Multiply(name='clean_snippet')([output1, input2])
model = Model(inputs=[input1, input2], outputs=[output1, output2])
optimizer = SGD(lr=0.02, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=5)
loss_fcns = {'clean': losses.MeanAbsoluteError(), 'clean_snippet': losses.MeanAbsoluteError()}
model.compile(loss=loss_fcns, optimizer=optimizer, metrics=['acc'])
model.fit(x=train_ds, y=None, epochs=1000, shuffle=True, verbose=1)
This is a classic bug that pops up because of graph mode. When you run this code, it's not really running the code, but Tensorflow introspects the python code and compiles it to a graph that runs well on GPU. Some of the things you think you can do in Python, you can't do when it's compiled.
In this case, tensor shapes must be fixed during execution, so you can't have dynamic output shapes during training.
Instead of cropping in the model, I'd just zero out the pixels you would have cropped. And in your dataset of training images, instead of dynamically adjusting the image sizes, dynamically adjust then pad with zeros to match the image size (and exception location). The MAE of those zero pixels in the ground truth and the hard coded zeros will be zero.
And drop the k.eval. You won't need it anymore - you can build masks with input2 and input3 directly using tf ops. Note that tf ops take the full batch, unlike Keras layers, and you can't loop, so you'll need to do it vectorized. You can do it with tf.sequence_mask.

TF2 code 10 times slower than equivalent PyTorch code for a Conv1D network

I've been trying to translate some PyTorch code to TensorFlow 2, but the TF2 code is around 10 times slower. I've tried looking at where this might come from, and as far as I can tell it comes from the tape.gradient call (performance was the same with keras' .fit function). I've tried to use different data loaders, ways of declaring the model, installations, etc... and the results have been consistent.
Any explanation / solution as to why this is happening would be much appreciated.
Here is a minimalist version of the TF2 code:
import time
import tensorflow as tf
from tensorflow.keras import layers
import numpy as np
# Generate some fake data
train_labels = np.random.randint(10, size=1000)
train_data = np.random.rand(1000, 120, 18, 1)
train_dataset = tf.data.Dataset.from_tensor_slices((train_data, train_labels))
train_dataset = train_dataset.batch(256)
# Create a small model
model = tf.keras.Sequential([
layers.Conv1D(64, kernel_size=7, strides=3, padding="same", activation="relu"),
layers.Conv1D(64, kernel_size=5, strides=2, padding="same", activation="relu"),
layers.Conv1D(128, kernel_size=5, strides=2, padding="same", activation="relu"),
layers.Conv1D(128, kernel_size=3, strides=1, padding="same", activation="relu"),
layers.Conv1D(128, kernel_size=3, strides=1, padding="same", activation="relu"),
layers.Conv1D(256, kernel_size=1, strides=1, padding="same", activation="relu"),
layers.GlobalAveragePooling2D(),
layers.Flatten(),
layers.Dense(128, use_bias=True, activation="relu"),
layers.Dense(32, use_bias=True, activation="relu"),
layers.Dense(1, activation='sigmoid', use_bias=True),
])
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3, decay=5e-4)
#tf.function
def train_step(data_batch, label_batch):
with tf.GradientTape() as tape:
y_pred = model(data_batch)
loss = tf.keras.losses.MSE(labels_batch, y_pred)
gradients = tape.gradient(loss, model.trainable_weights)
optimizer.apply_gradients(zip(gradients, model.trainable_weights))
step_times = []
for epoch in range(20):
for data_batch, labels_batch in train_dataset:
step_start_time = time.perf_counter()
train_step(data_batch, labels_batch)
if epoch != 0:
step_times.append(time.perf_counter()-step_start_time)
print(f"Average training step time: {np.mean(step_times):.3f}s.")
And the PyTorch equivalent:
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
torch.backends.cudnn.benchmark = True
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Generate some fake data
train_labels = np.random.randint(10, size=1000)
train_data = np.random.rand(1000, 18, 120)
# Create a small model
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv1d(18, 64, kernel_size=7, stride=3, padding=3)
self.conv2 = nn.Conv1d(64, 64, kernel_size=5, stride=2, padding=2)
self.conv3 = nn.Conv1d(64, 128, kernel_size=5, stride=2, padding=2)
self.conv4 = nn.Conv1d(128, 128, kernel_size=3, stride=1, padding=1)
self.conv5 = nn.Conv1d(128, 128, kernel_size=3, stride=1, padding=1)
self.conv6 = nn.Conv1d(128, 256, kernel_size=3, stride=1, padding=1)
self.fc1 = nn.Linear(256, 128)
self.fc2 = nn.Linear(128, 32)
self.fc3 = nn.Linear(32, 1)
def forward(self, inputs):
x = F.relu(self.conv1(inputs))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = F.relu(self.conv5(x))
x = F.relu(self.conv6(x))
x = x.mean(2)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = torch.sigmoid(self.fc3(x))
return x
model = Model()
model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3, weight_decay=5e-4)
loss_fn = torch.nn.MSELoss()
batch_size = 256
train_steps_per_epoch = train_data.shape[0] // batch_size
step_times = []
for epoch in range(20):
for step in range(train_steps_per_epoch):
batch_start, batch_end = step * batch_size, (step+1) * batch_size
data_batch = torch.FloatTensor(train_data[batch_start:batch_end]).to(device)
labels_batch = torch.FloatTensor(train_labels[batch_start:batch_end]).to(device)
step_start_time = time.perf_counter()
optimizer.zero_grad()
y_pred = model(data_batch)
loss = loss_fn(labels_batch, torch.squeeze(y_pred))
loss.backward()
optimizer.step()
if epoch != 0:
step_times.append(time.perf_counter()-step_start_time)
print(f"Average training step time: {np.mean(step_times):.3f}s.")
You're using tf.GradientTape correctly, but both your models and data are different in the snippets you provided.
Here is the TF code that uses the same data and model architecture as your Pytorch model.
import time
import tensorflow as tf
from tensorflow.keras import layers
import numpy as np
# Generate some fake data
train_labels = np.random.randint(10, size=1000)
train_data = np.random.rand(1000, 120, 18)
train_dataset = tf.data.Dataset.from_tensor_slices((train_data, train_labels))
train_dataset = train_dataset.batch(256)
model = tf.keras.Sequential([
layers.Conv1D(64, kernel_size=7, strides=3, padding="same", activation="relu"),
layers.Conv1D(64, kernel_size=5, strides=2, padding="same", activation="relu"),
layers.Conv1D(128, kernel_size=5, strides=2, padding="same", activation="relu"),
layers.Conv1D(128, kernel_size=3, strides=1, padding="same", activation="relu"),
layers.Conv1D(128, kernel_size=3, strides=1, padding="same", activation="relu"),
layers.Conv1D(256, kernel_size=3, strides=1, padding="same", activation="relu"),
layers.GlobalAveragePooling1D(),
layers.Dense(128, use_bias=True, activation="relu"),
layers.Dense(32, use_bias=True, activation="relu"),
layers.Dense(1, activation='sigmoid', use_bias=True),
])
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3, decay=5e-4)
#tf.function
def train_step(data_batch, label_batch, model):
with tf.GradientTape() as tape:
y_pred = model(data_batch, training=True)
loss = tf.keras.losses.MSE(labels_batch, y_pred)
gradients = tape.gradient(loss, model.trainable_weights)
optimizer.apply_gradients(zip(gradients, model.trainable_weights))
step_times = []
for epoch in range(20):
for data_batch, labels_batch in train_dataset:
step_start_time = time.perf_counter()
train_step(data_batch, labels_batch, model)
if epoch != 0:
step_times.append(time.perf_counter()-step_start_time)
print(f"Average training step time: {np.mean(step_times):.3f}s.")
So, in reality, TF is 3 times faster than Pytorch: 0.035s vs 0.112s

Problem about training tensorflow estimator with tf.records files

I'm using tensorflow estimator with tf.records files to training but I can't solve some errors.
My dataset size is big...So I have to change my dataset npz->tfrecords. TF estimator is running well with numpy file dataset.
First, My saved tfrecords file shape = (N, 437, 256). My batch_size is 30. Here's my code.
def parser(serialized_example):
"""Parses a single tf.Example into image and label tensors."""
features = tf.parse_single_example(
serialized_example,
features={
'train_input_enc/time': tf.FixedLenFeature([], tf.float32),
'train_input_enc/fft': tf.FixedLenFeature([], tf.float32),
'train_input_enc/data_raw': tf.FixedLenFeature([], tf.string),
'train_output_dec/time': tf.FixedLenFeature([], tf.float32),
'train_output_dec/fft': tf.FixedLenFeature([], tf.float32),
'train_output_dec/data_raw': tf.FixedLenFeature([], tf.string),
'train_target_dec/time': tf.FixedLenFeature([], tf.float32),
'train_target_dec/fft': tf.FixedLenFeature([], tf.float32),
'train_target_dec/data_raw': tf.FixedLenFeature([], tf.string)
})
inp_enc_spec = tf.decode_raw(features['train_input_enc/data_raw'], tf.float32)
oup_dec_spec = tf.decode_raw(features['train_output_dec/data_raw'], tf.float32)
trg_dec_spec = tf.decode_raw(features['train_target_dec/data_raw'], tf.float32)
inp_enc_spectrogram = tf.reshape(inp_enc_spec, [DEFINES.max_sequence_length, DEFINES.embedding_size])
oup_dec_spectrogram = tf.reshape(oup_dec_spec, [DEFINES.max_sequence_length, DEFINES.embedding_size])
trg_dec_spectrogram = tf.reshape(trg_dec_spec, [DEFINES.max_sequence_length, DEFINES.embedding_size])
return inp_enc_spectrogram, oup_dec_spectrogram, trg_dec_spectrogram
def rearrange(input, output, target):
features = {"input": input, "output": output}
return features, target
def train_input_fn(train_input_enc, train_output_dec, train_target_dec, batch_size):
dataset = tf.data.TFRecordDataset([train_input_enc, train_output_dec, train_target_dec])
print("input dataset", dataset)
dataset = dataset.map(map_func=parser)
print("dataset map with parser",dataset) # <TFRecordDataset shapes: (), types: tf.string>
dataset = dataset.shuffle(buffer_size=900)
print("dataset shuffle", dataset) # <MapDataset shapes: ((437, 256), (437, 256), (437, 256)), types: (tf.float32, tf.float32, tf.float32)>
dataset = dataset.batch(batch_size = batch_size, drop_remainder=True)
print("dataset batch", dataset) # <BatchDataset shapes: ((30, 437, 256), (30, 437, 256), (30, 437, 256)), types: (tf.float32, tf.float32, tf.float32)>
dataset = dataset.map(rearrange)
print("dataset after rearrange",dataset) # <MapDataset shapes: ({output: (30, 437, 256), input: (30, 437, 256)}, (30, 437, 256)), types: ({output: tf.float32, input: tf.float32}, tf.float32)>
dataset = dataset.repeat()
print("dataset repeat",dataset) # <RepeatDataset shapes: ({output: (30, 437, 256), input: (30, 437, 256)}, (30, 437, 256)), types: ({output: tf.float32, input: tf.float32}, tf.float32)>
iterator = dataset.make_initializable_iterator()
print("iterator init", iterator) # <tensorflow.python.data.ops.iterator_ops.Iterator object at 0x7f9ed1700128>
# iterator = dataset.make_one_shot_iterator()
# print("iterator oneshow",iterator)
iterator = iterator.get_next()
print("iterator",iterator) # ({'output': <tf.Tensor 'IteratorGetNext:1' shape=(30, 437, 256) dtype=float32>, 'input': <tf.Tensor 'IteratorGetNext:0' shape=(30, 437, 256) dtype=float32>}, <t
f.Tensor 'IteratorGetNext:2' shape=(30, 437, 256) dtype=float32>)
return iterator
def main()
filename_inp_enc = './train_input_enc.tfrecords'
filename_oup_dec = './train_output_dec.tfrecords'
filename_trg_dec = './train_target_dec.tfrecords'
check_point_path = os.path.join(os.getcwd(), DEFINES.check_point_path)
os.makedirs(check_point_path, exist_ok=True)
classifier = tf.estimator.Estimator(
model_fn=ml.Model,
model_dir=DEFINES.check_point_path,
config=estimator_config,
params={
...
})
classifier.train(input_fn=lambda: data.train_input_fn(filename_inp_enc, filename_oup_dec, filename_trg_dec, DEFINES.batch_size),steps=DEFINES.train_steps)
Error messages are:
FailedPreconditionError (see above for traceback): GetNext() failed because the iterator has not been initialized. Ensure that you have run the initializer operation for this iterator before getting the next element.
[[{{node IteratorGetNext}} = IteratorGetNext[output_shapes=[[30,437,256], [30,437,256], [30,437,256]], output_types=[DT_FLOAT, DT_FLOAT, DT_FLOAT], _device="/job:localhost/replica:0/task:0/device:CPU:0"](IteratorV2)]]
When I put
iterator = dataset.make_one_shot_iterator()
return iterator.get_next()
the error messages are follows:
tensorflow.python.framework.errors_impl.InvalidArgumentError: Feature: train_output_dec/data_raw (data type: string) is required but could not be found.
[[{{node ParseSingleExample/ParseSingleExample}} = ParseSingleExample[Tdense=[DT_STRING, DT_FLOAT, DT_FLOAT, DT_STRING, DT_FLOAT, DT_FLOAT, DT_STRING, DT_FLOAT, DT_FLOAT], dense_keys=["train_input_enc/data_raw", "train_input_enc/fft", "train_input_enc/time", "train_output_dec/data_raw", "train_output_dec/fft", "train_output_dec/time", "train_target_dec/data_raw", "train_target_dec/fft", "train_target_dec/time"], dense_shapes=[[], [], [], [], [], [], [], [], []], num_sparse=0, sparse_keys=[], sparse_types=[], _device="/device:CPU:0"](arg0, ParseSingleExample/Const, ParseSingleExample/Const_1, ParseSingleExample/Const_1, ParseSingleExample/Const, ParseSingleExample/Const_1, ParseSingleExample/Const_1, ParseSingleExample/Const, ParseSingleExample/Const_1, ParseSingleExample/Const_1)]]
[[{{node IteratorGetNext}} = IteratorGetNext[output_shapes=[[30,437,256], [30,437,256], [30,437,256]], output_types=[DT_FLOAT, DT_FLOAT, DT_FLOAT], _device="/job:localhost/replica:0/task:0/device:CPU:0"](OneShotIterator)]]
[[{{node IteratorGetNext/_4351}} = _Recv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/device:GPU:0", send_device="/job:localhost/replica:0/task:0/device:CPU:0", send_device_incarnation=1, tensor_name="edge_192_IteratorGetNext", tensor_type=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:GPU:0"]()]]
How can I solve this problem?

You must feed value for placeholder *_sample_weights while training UNET from VGG16

I am trying to create a UNET using VGG16 as first layers.
def BuildUNet2():
keras.backend.set_learning_phase(1)
inputs = keras.layers.Input(shape=(PATCH_SIZE, PATCH_SIZE, 3), name="inputs")
vggModel=keras.applications.VGG16(include_top=False, input_tensor=inputs)
layers = dict([(layer.name, layer) for layer in vggModel.layers])
print("Layers", len(layers), layers)
block1_conv2 = layers["block1_conv2"].output
block2_conv2 = layers["block2_conv2"].output
block3_conv3 = layers["block3_conv3"].output
block4_conv3 = layers["block4_conv3"].output
vggTop = layers["block5_conv3"].output
up6=keras.layers.concatenate([keras.layers.Conv2DTranspose(256, (2,2), strides=(2,2), padding="same")(vggTop), block4_conv3], axis=3)
conv61=keras.layers.Conv2D(256, 3, activation="relu", padding="same", kernel_initializer="he_normal")(up6)
conv62=keras.layers.Conv2D(256, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv61)
up7 = keras.layers.concatenate([keras.layers.Conv2DTranspose(128, (2, 2), strides=(2, 2), padding="same")(conv62), block3_conv3], axis=3)
conv71=keras.layers.Conv2D(128, 3, activation="relu", padding="same", kernel_initializer="he_normal")(up7)
conv72=keras.layers.Conv2D(128, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv71)
up8 = keras.layers.concatenate([keras.layers.Conv2DTranspose(64, (2, 2), strides=(2, 2), padding="same")(conv72), block2_conv2], axis=3)
conv81=keras.layers.Conv2D(64, 3, activation="relu", padding="same", kernel_initializer="he_normal")(up8)
conv82=keras.layers.Conv2D(64, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv81)
up9 = keras.layers.concatenate([keras.layers.Conv2DTranspose(32, (2, 2), strides=(2, 2), padding="same")(conv82), block1_conv2], axis=3)
conv91=keras.layers.Conv2D(32, 3, activation="relu", padding="same", kernel_initializer="he_normal")(up9)
conv92=keras.layers.Conv2D(32, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv91)
conv93=keras.layers.Conv2D(1, (1, 1), activation="sigmoid")(conv92)
model = keras.models.Model(input=[inputs], output=[conv93])
for layer in model.layers[:19]:
layer.trainable = False
model.compile(optimizer=keras.optimizers.Adam(lr=1e-5), loss=metric.dice_coef_loss,
metrics=[metric.dice_coef, "accuracy"])
model.summary()
return model
I am training with:
with h5py.File(parms.training, "r") as trainingsFile:
wrk=trainingsFile["work"].value
np.random.seed(42)
np.random.shuffle(wrk)
limit=int(wrk.shape[0]*0.8)
trainData=wrk[:limit]
valData=wrk[limit:]
trainGen=DataGenerator(trainData, parms.batchSize)
valGen=DataGenerator(valData, parms.batchSize)
bestCheckpoint = keras.callbacks.ModelCheckpoint("best.h5",
monitor="val_loss",
save_best_only=True,
save_weights_only=False)
regCheckpoint = keras.callbacks.ModelCheckpoint("checkpoint-{epoch:04d}.h5", period=10)
csvLog = keras.callbacks.CSVLogger("log.csv", append=True)
runName = datetime.datetime.now().isoformat("#")[:19].replace(":", "-")
tensorBoard = keras.callbacks.TensorBoard(log_dir="./logs/%s/" % runName)
lrPlateau = keras.callbacks.ReduceLROnPlateau(monitor="val_loss", factor=0.2, patience=10, cooldown=5)
model.fit_generator(trainGen,
epochs=parms.epochs,
steps_per_epoch=trainGen.__len__(),
validation_data=valGen,
validation_steps=valGen.__len__(),
callbacks=[bestCheckpoint, regCheckpoint, csvLog, tensorBoard, lrPlateau],
use_multiprocessing=False,
)
The DataGenerator is defined as:
class DataGenerator(keras.utils.Sequence):
def __init__(self, data, batchSize):
self.data=data
self.batchSize=batchSize
def __len__(self):
return int((self.data.shape[0]+self.batchSize-1)/(self.batchSize))
def __getitem__(self, item):
X=np.zeros((self.batchSize, self.data.shape[1], self.data.shape[2], 3), dtype=np.float32)
Y=np.zeros((self.batchSize, self.data.shape[1], self.data.shape[2]), dtype=np.float32)
j=0
wrk=np.zeros((self.data.shape[1], self.data.shape[2], self.data.shape[3]), dtype=np.float32)
for i in range(item*self.batchSize, min((item+1)*self.batchSize,self.data.shape[0])):
wrk=self.data[i, :, :, :]
if random.random() < 0.5:
wrk=wrk[:, ::-1, :]
if random.random() < 0.5:
wrk = wrk[::-1, :, :]
direction = int(random.random() * 4) * 90
if direction:
wrk = imutils.rotate(wrk, direction)
X[j, :, :, :]=wrk[:, :, 0: 3]
Y[j, :, :]=wrk[:, :, 3]
j+=1
X=X.resize((j, X.shape[1], X.shape[2], X.shape[3]))
Y=Y.resize((j, Y.shape[1], Y.shape[2]))
return X, Y
Trying to train the model results in
tensorflow.python.framework.errors_impl.InvalidArgumentError: You must feed a value for placeholder tensor 'conv2d_9_sample_weights' with dtype float and shape [?]
Even explicitly returning a sample_weight (an addtional np.ones((j), dtype=np.float32) from the DataGenerator does not solve the problem.
What's wrong?
How do I correct it?
The problem was with DataGenerator.getitem():
resize does not return a new numpy array. It changes the original array and returns nothing. Therefore the getitem method returned None, None.
The keras error messages is misleading.

Why is the reduce_mean applied to the output of sparse_softmax_cross_entropy_with_logits?

There are several tutorials that applied reduce_mean to the output of sparse_softmax_cross_entropy_with_logits. For example
cross_entropy = -tf.reduce_sum(y_ * tf.log(y_conv))
or
cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=tf.cast(y_, dtype=tf.int32), logits=y_conv))
Why is the reduce_mean applied to the output of sparse_softmax_cross_entropy_with_logits? Is it because we are using mini-batches, and so we want to calculate (using reduce_mean) the average loss over all samples of the mini-batch?
The reason is to get the average loss over the batch.
Generally you will train a neural network with input batches of size > 1, each element in the batch will produce a loss value so the easiest way to merge these into one value is to average.
I find something interesting~
first, let define sparse_vector as
sparse_vector = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=tf.cast(y_, dtype=tf.int32), logits=y_conv)
the sparse_vector is a vector, and we should calculate the summery of it, that why we should use the reduce_mean.
import numpy as np
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.InteractiveSession(config=config)
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=False)
print(mnist.test.labels.shape)
print(mnist.train.labels.shape)
with tf.name_scope('inputs'):
X_ = tf.placeholder(tf.float32, [None, 784])
y_ = tf.placeholder(tf.int64, [None])
X = tf.reshape(X_, [-1, 28, 28, 1])
h_conv1 = tf.layers.conv2d(X, filters=32, kernel_size=5, strides=1,
padding='same', activation=tf.nn.relu, name='conv1')
h_pool1 = tf.layers.max_pooling2d(h_conv1, pool_size=2, strides=2,
padding='same', name='pool1')
h_conv2 = tf.layers.conv2d(h_pool1, filters=64, kernel_size=5, strides=1,
padding='same',activation=tf.nn.relu, name='conv2')
h_pool2 = tf.layers.max_pooling2d(h_conv2, pool_size=2, strides=2,
padding='same', name='pool2')
# flatten
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.layers.dense(h_pool2_flat, 1024, name='fc1', activation=tf.nn.relu)
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, 0.5)
h_fc2 = tf.layers.dense(h_fc1_drop, units=10, name='fc2')
# y_conv = tf.nn.softmax(h_fc2)
y_conv = h_fc2
# print('Finished building network.')
# cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv))
sparse_vector = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=tf.cast(y_, dtype=tf.int32), logits=y_conv)
cross_entropy = tf.reduce_mean(sparse_vector)
sess.run(tf.global_variables_initializer())
# print(sparse_vector)
# print(cross_entropy)
# Tensor("SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits:0", shape=(?,), dtype=float32)
# Tensor("Mean:0", shape=(), dtype=float32)
batch = mnist.train.next_batch(10)
sparse_vector,cross_entropy = sess.run(
[sparse_vector,cross_entropy],
feed_dict={X_: batch[0], y_: batch[1]})
print(sparse_vector)
print(cross_entropy)
the output is
[2.2213464 2.2676413 2.3555744 2.3196406 2.0794516 2.394274 2.266591
2.3139718 2.345526 2.3952296]
2.2959247