Error in defining layers as subclass in keras tensorflow - tensorflow

How to define a layer method in subclass while operating in keras?
There must be a self.layer = ... in the init? But what should it be? Like in pytorch there is self.layers = nn.ModuleList(layers)
class GTN(keras.Model): # layers.Layer keeps track of everything under the hood!
def __init__(self, num_edge, num_channels, w_in, w_out, num_class,num_layers,norm):
super(GTN, self).__init__()
self.num_edge = num_edge
self.num_channels = num_channels
self.w_in = w_in
self.w_out = w_out
self.num_class = num_class
self.num_layers = num_layers
self.is_norm = norm
layers = []
for i in tf.range(num_layers):
if i == 0:
layers.append(GTLayer(num_edge, num_channels, first=True))
else:
layers.append(GTLayer(num_edge, num_channels, first=False))
def call(self, A, X, target_x, target):
A = tf.expand_dims(A, 0)
Ws = []
for i in range(self.num_layers):
if i == 0:
H, W = self.layers[i](A) #self.layers = nn.ModuleList(layers)
else:
H = self.normalization(H)
H, W = self.layers[i](A, H)
Ws.append(W)
File "/Users/anshumansinha/Desktop/GTN/model_tf_2.py", line 108, in
call
H, W = self.layersi #self.layers = nn.ModuleList(layers) AttributeError: Exception encountered when calling layer "gtn" (type
GTN).
'GTN' object has no attribute 'layers'

Related

Error in defining custom layers using subclasses in keras

I am getting an error in defining layers to my model. The input for self.layer[i] is not working when I'm implementing X = self.layers[i](A, A)inside the code. But the layer[i] which is made from GTLayer, works well for when I call it separetly, A = tf.random.uniform(shape=[2,2]) d = GTLayer(2,2,1) d.call(A,A)
..
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
class GTN(keras.Model): # layers.Layer keeps track of everything under the hood!
def __init__(self, num_edge, num_channels, w_in, w_out, num_class,num_layers,norm):
super(GTN, self).__init__()
self.num_layers = 3
self.num_edge = 6
self.num_channels = 3
self.w_in = 2#tf.random.uniform(shape=[2,2])
self.w_out = 2#tf.random.uniform(shape=[2,2])
self.num_class =2
layers = []
for i in tf.range(num_layers):
print (4)
if i == 0:
layers.append(GTLayer(num_edge, num_channels, first=True))
else:
print(i)
layers.append(GTLayer(num_edge, num_channels, first=False))
#print ((self.w_out*self.num_channels, ))
self.linear1 = tf.keras.layers.Dense( self.w_out, input_shape =(self.w_out*self.num_channels, ), activation= None)
self.linear2 = tf.keras.layers.Dense( self.num_class, input_shape=(self.w_out, ), activation= None)
def call(self, A, X, target_x, target):
#A = tf.expand_dims(A, 0)
Ws = []
print('hello')
for i in range(self.num_layers):
print('i:',i)
if i == 0:
print('layers:',layers)
print('layers[i](A):', self.layers[i](A))
H, W = self.layers[i](A) #self.layers = nn.ModuleList(layers)
else:
#H = self.normalization(H)
print(H)
print(W)
print('A', A)
X = self.layers[i](A, A)
print('H_dash',self.layers[i](A))
H, W = self.layers[i](A, H)
Ws.append(W)
for i in range(self.num_channels):
if i==0:
X_ = tf.nn.relu(self.gcn_conv(X,H[i])).numpy()
else:
X_tmp = tf.nn.relu(self.gcn_conv(X,H[i])).numpy()
X_ = tf.concat((X_,X_tmp), dim=1)
X_ = self.linear1(X_)
X_ = tf.nn.relu(X_).numpy()
y = self.linear2(X_[target_x])
loss = self.loss(y, target)
return loss, y, Ws
class GTLayer(keras.layers.Layer):
def __init__(self, in_channels, out_channels, first=True):
super(GTLayer, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.first = first
def call(self, A, H_ = None):
if self.first == True:
a = tf.random.uniform(shape=[2,2])
b = tf.random.uniform(shape=[2,2])
print(a)
print(b)
#H = torch.bmm(np.array(a),np.array(b))
H = tf.matmul( a, b)
else:
a = tf.random.uniform(shape=[2])
H = tf.random.uniform(shape=[2])
return H,W
Input used for check:
d = GTN(2,3,4,2,2,2,1)
A = tf.random.uniform(shape=[2,2])
X = tf.random.uniform(shape=[2,2])
t_x = 5
t = 4
d.call(A,X,t_x,t)
Error:
---> 47 X = self.layers[i](A, A)
TypeError: call() takes 2 positional arguments but 3 were given

TensorFlow training with large dataset takes too long

Yesterday, I have created a pretrained VGG19 with custom head and tried to train it with 60000 images. After more than 12 hours, the training of first epoch didn't complete.
The batch size has been set to 64 and the number of steps per epoch has been set to training_set_size/batch_size.
Below is the code of DataLoader:
IMAGE_CHANNEL = 3
def crop(image, margin):
return image[margin:-margin, margin:-margin]
def random_rotation(image, angle):
M = cv2.getRotationMatrix2D((0, 0),angle,1)
rows,cols, _ = image.shape
new_img = cv2.warpAffine(image, M, (cols, rows))
return new_img
def get_generator(in_gen, should_augment=True):
weights = None
if should_augment:
image_gen = tf.keras.preprocessing.image.ImageDataGenerator(fill_mode='reflect',
data_format='channels_last',
brightness_range=[0.5, 1.5])
else:
image_gen = tf.keras.preprocessing.image.ImageDataGenerator(fill_mode='reflect',
data_format='channels_last',
brightness_range=[1, 1])
for items in in_gen:
in_x, in_y = items
g_x = image_gen.flow(255 * in_x, in_y, batch_size=in_x.shape[0])
x, y = next(g_x)
yield x / 255.0, y
class DataLoader:
def __init__(self, source_filename, dataset_path, image_size, batch_size, training_set_size=0.8, sample_size=None):
path_dataset = Path(dataset_path)
path_image_folders = path_dataset / 'images'
self.data = pd.read_pickle(source_filename)
if sample_size is not None:
self.data = self.data[:sample_size]
self.image_size = image_size
self.batch_size = batch_size
self.training_set_size = training_set_size
self.steps_per_epoch = int(self.data.shape[0] * training_set_size // batch_size)
if self.steps_per_epoch == 0: self.steps_per_epoch = 1
self.validation_steps = int(self.data.shape[0] * (1 - training_set_size)//batch_size)
if self.validation_steps == 0: self.validation_steps = 1
def draw_idx(self, i):
img_path = self.data.iloc[i].image
img = tf.keras.preprocessing.image.img_to_array(tf.keras.preprocessing.image.load_img(str(img_path)))
# print(img.shape)
height, width, _ = img.shape
fig = plt.figure(figsize=(15, 15), facecolor='w')
# original image
ax = fig.add_subplot(1, 1, 1)
ax.imshow(img / 255.0)
openness = self.data.iloc[i].Openness
conscientiousness = self.data.iloc[i].Conscientiousness
extraversion = self.data.iloc[i].Extraversion
agreeableness = self.data.iloc[i].Agreeableness
neuroticism = self.data.iloc[i].Neuroticism
ax.title.set_text(
f'O: {openness}, C: {conscientiousness}, E: {extraversion}, A: {agreeableness}, N: {neuroticism}')
plt.axis('off')
plt.tight_layout()
plt.show()
def get_image(self, index, data, should_augment):
# Read image and appropiate landmarks
image = cv2.imread(data['image'].values[index])
h, w, _ = image.shape
o, c, e, a, n = data[['Openness', 'Conscientiousness', 'Extraversion', 'Agreeableness', 'Neuroticism']].values[
index]
should_flip = random.randint(0, 1)
should_rotate = random.randint(0, 1)
should_crop = random.randint(0, 1)
if should_augment:
if should_flip == 1:
# print("Image {} flipped".format(data['path'].values[index]))
image = cv2.flip(image, 1)
if should_rotate == 1:
angle = random.randint(-5, 5)
image = random_rotation(image, angle)
if should_crop == 1:
margin = random.randint(1, 10)
image = crop(image, margin)
image = cv2.resize(image, (self.image_size, self.image_size))
return [image, o, c, e, a, n]
def generator(self, data, should_augment=True):
while True:
# Randomize the indices to make an array
indices_arr = np.random.permutation(data.count()[0])
for batch in range(0, len(indices_arr), self.batch_size):
# slice out the current batch according to batch-size
current_batch = indices_arr[batch:(batch + self.batch_size)]
# initializing the arrays, x_train and y_train
x_train = np.empty(
[0, self.image_size, self.image_size, IMAGE_CHANNEL], dtype=np.float32)
y_train = np.empty([0, 5], dtype=np.int32)
for i in current_batch:
# get an image and its corresponding color for an traffic light
[image, o, c, e, a, n] = self.get_image(i, data, should_augment)
# Appending them to existing batch
x_train = np.append(x_train, [image], axis=0)
y_train = np.append(y_train, [[o, c, e, a, n]], axis=0)
# replace nan values with zeros
y_train = np.nan_to_num(y_train)
yield (x_train, y_train)
def get_training_and_test_generators(self, should_augment_training=True, should_augment_test=True):
msk = np.random.rand(len(self.data)) < self.training_set_size
train = self.data[msk]
test = self.data[~msk]
train_gen = self.generator(train, should_augment_training)
test_gen = self.generator(test, should_augment_test)
return get_generator(train_gen, should_augment_training), get_generator(test_gen, should_augment_test)
def show_batch_images_sample(self, images, landmarks, n_rows=3, n_cols=3):
assert n_rows * n_cols <= self.batch_size, "Number of expected images to display is larger than batch!"
fig = plt.figure(figsize=(15, 15))
xs, ys = [], []
count = 1
for img, y in zip(images, landmarks):
ax = fig.add_subplot(n_rows, n_cols, count)
ax.imshow(img)
h, w, _ = img.shape
o, c, e, a, n = y
ax.title.set_text(f'{o}, {c}, {e}, {a}, {n}')
ax.axis('off')
if count == n_rows * n_cols:
break
count += 1
class CallbackTensorboardImageOutput(Callback):
def __init__(self, model, generator, log_dir, feed_inputs_display=9):
# assert ((feed_inputs_display & (feed_inputs_display - 1)) == 0) and feed_inputs_display != 0
self.generator = generator
self.model = model
self.log_dir = log_dir
self.writer = tf.summary.create_file_writer(self.log_dir)
self.feed_inputs_display = feed_inputs_display
self.seen = 0
def plot_to_image(figure):
"""Converts the matplotlib plot specified by 'figure' to a PNG image and
returns it. The supplied figure is closed and inaccessible after this call."""
# Save the plot to a PNG in memory.
buf = io.BytesIO()
plt.savefig(buf, format='png')
# Closing the figure prevents it from being displayed directly inside
# the notebook.
plt.close(figure)
buf.seek(0)
# Convert PNG buffer to TF image
image = tf.image.decode_png(buf.getvalue(), channels=4)
# Add the batch dimension
image = tf.expand_dims(image, 0)
return image
#staticmethod
def get_loss(gt, predictions):
return tf.losses.mse(gt, predictions)
def on_epoch_end(self, epoch, logs={}):
self.seen += 1
if self.seen % 1 == 0:
items = next(self.generator)
images_to_display = self.feed_inputs_display
images_per_cell_count = int(math.sqrt(images_to_display))
# in case of regular model training using generator, an array is passed
if not isinstance(items, dict):
frames_arr, ocean_scores = items
# Take just 1st sample from batch
batch_size = frames_arr.shape[0]
if images_to_display > batch_size:
images_to_display = batch_size
frames_arr = frames_arr[0:images_to_display]
ocean_scores = ocean_scores[0:images_to_display]
y_pred = self.model.predict(frames_arr)
# in case of adversarial training, a dictionary is passed
else:
batch_size = items['feature'].shape[0]
if images_to_display > batch_size:
images_to_display = batch_size
# items['feature'] = items['feature'][0:images_to_display]
# landmarks = items['label'][0:images_to_display]
frames_arr = items['feature']
landmarks = items['label']
y_pred = self.model.predict(items)
figure = plt.figure(figsize=(15, 15))
for i in range(images_to_display):
image_current = frames_arr[i]
y_prediction_current = y_pred[i]
y_gt_current = ocean_scores[i]
lbl_prediction = 'plot/img/{}'.format(i)
ax = plt.subplot(images_per_cell_count, images_per_cell_count, i + 1, title=lbl_prediction)
ax.imshow(image_current)
ax.axis('off')
with self.writer.as_default():
tf.summary.image("Training Data", CallbackTensorboardImageOutput.plot_to_image(figure), step=self.seen)
Below is the definition of the network architecture and the call of fit_generator function:
data_loader = dataloader.DataLoader('dataset.pkl', '/home/niko/data/PsychoFlickr', 224, 64)
train_gen, test_gen = data_loader.get_training_and_test_generators()
pre_trained_model = tf.keras.applications.VGG19(input_shape=(data_loader.image_size, data_loader.image_size, dataloader.IMAGE_CHANNEL), weights='imagenet', include_top=False)
x = pre_trained_model.output
x = tf.keras.layers.Flatten()(x)
# Add a fully connected layer with 256 hidden units and ReLU activation
x = tf.keras.layers.Dense(256)(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.Dropout(rate=0.5)(x)
x = tf.keras.layers.Dense(256)(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.Dropout(rate=0.5)(x)
x = tf.keras.layers.Dense(5, name='regresion_output')(x)
x = tf.keras.layers.Activation('linear')(x)
model = tf.keras.Model(pre_trained_model.input, x)
print(model.summary())
log_dir = "logs/{}".format(model_name)
model_filename = "saved-models/{}.h5".format(model_name)
cb_tensorboard = TensorBoard(log_dir=log_dir)
callback_save_images = dataloader.CallbackTensorboardImageOutput(model, test_gen, log_dir)
checkpoint = ModelCheckpoint(model_filename, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
lr = 1e-3
opt = tf.optimizers.Adam(lr=lr)
model.compile(loss=loss_mse, optimizer=opt, metrics=[loss_mse])
history = model.fit_generator(
train_gen,
validation_data=test_gen,
steps_per_epoch=data_loader.steps_per_epoch,
epochs=20,
validation_steps=data_loader.validation_steps,
verbose=2,
use_multiprocessing=True,
callbacks=[checkpoint, callback_save_images, cb_tensorboard]
)
When I tried to run the same procedure with small sample data (200 records), everything seemed to work fine. On the dataset of 60000 records, however, after more than 12 hours the training of 1st epoch hasn't completed.
The training is performed on NVIDIA RTX2080Ti.
I would be thankful if anyone suggested what has to be modified or in general configured in order to train the network on reasonable time.

I get an error when I load a tensorflow2.0 model

I am learning a simple model to perform a linear regression and then I save the model
class NN(tf.keras.Model):
def __init__(self):
super(NN, self).__init__()
L = 20
self.W1 = tf.Variable(tf.random.truncated_normal([1, L], stddev=math.sqrt(3)))
self.B1 = tf.Variable(tf.random.truncated_normal([1, L], stddev=1.0))
self.W2 = tf.Variable(tf.random.truncated_normal([L, 1], stddev=math.sqrt(3/L)))
self.B2 = tf.Variable(tf.zeros([1]))
def call(self, inputs):
Z1 = tf.matmul(inputs, self.W1) + self.B1
Y1 = tf.nn.tanh(Z1)
Y = tf.matmul(Y1, self.W2) + self.B2
return Y
# The loss function to be optimized
def loss(model, X, Y_):
error = model(X) - Y_
return tf.reduce_mean(tf.square(error))
model = NN()
optimizer = tf.optimizers.Adam(learning_rate=0.001)
bsize = 20
# You can call this function in a loop to train the model, bsize samples at a time
def training_step(i):
# read data
x_batch, y_batch = func.next_batch(bsize)
x_batch = np.reshape(x_batch, (bsize,1))
y_batch = np.reshape(y_batch, (bsize,1))
# compute training values
loss_fn = lambda: loss(model, x_batch, y_batch)
optimizer.minimize(loss_fn, [model.W1, model.B1, model.W2, model.B2])
if i%5000 == 0:
l = loss(model, x_batch, y_batch)
print(str(i) + ": epoch: " + str(func._epochs_completed) + ": loss: " + str(l.numpy()))
for i in range(50001):
training_step(i)
# save the model
tf.saved_model.save(model, "my_file")
and then I am trying to load the model with the following lines following tensorflow documentation:
model = tf.saved_model.load("my_file")
f = model.signatures["serving_default"]
y = f(x)
However I get the following error message:
f = model.signatures["serving_default"]
File "my_file/signature_serialization.py", line 195, in __getitem__
return self._signatures[key]
KeyError: 'serving_default'
What is wrong ? Why serving_default is not defined ?
I solved the problem by adding a third argument to the tf.saved_model.save function
tf.saved_model.save(model, "myfile", signatures=model.call.get_concrete_function(tf.TensorSpec(shape=[None,1], dtype=tf.float32, name="inp")))
and by adding the #tf.function above the call method
class NN(tf.keras.Model):
def __init__(self):
super(NN, self).__init__()
L = 20
self.W1 = tf.Variable(tf.random.truncated_normal([1, L], stddev=math.sqrt(3)))
self.B1 = tf.Variable(tf.random.truncated_normal([1, L], stddev=1.0))
self.W2 = tf.Variable(tf.random.truncated_normal([L, 1], stddev=math.sqrt(3/L)))
self.B2 = tf.Variable(tf.zeros([1]))
#tf.function
def call(self, X):
Z1 = tf.matmul(X, self.W1) + self.B1
Y1 = tf.nn.tanh(Z1)
Y = tf.matmul(Y1, self.W2) + self.B2
return Y

Pytorch LSTM not using GPU

I'm trying to train a pytorch LSTM model connected with couple of MLP layers. The model is coded as follows:
class RNNBlock(nn.Module):
def __init__(self, in_dim, hidden_dim, num_layer=1, dropout=0):
super(RNNBlock, self).__init__()
self.hidden_dim = hidden_dim
self.num_layer = num_layer
self.lstm = nn.LSTM(in_dim, hidden_dim, num_layer, dropout)
def forward(self, onehot, length):
batch_size = onehot.shape[0]
h_in = nn.Parameter(torch.randn(self.num_layer, batch_size, self.hidden_dim))
c_in = nn.Parameter(torch.randn(self.num_layer, batch_size, self.hidden_dim))
packed = nn.utils.rnn.pack_padded_sequence(onehot, length, batch_first=True)
output, (h_out, c_out) = self.lstm(packed, (h_in, c_in))
unpacked, unpacked_length = nn.utils.rnn.pad_packed_sequence(output, batch_first=True)
vectors = list()
for i, vector in enumerate(unpacked):
vectors.append(unpacked[i, unpacked_length[i]-1, :].view(1, -1))
out = torch.cat(vectors, 0)
return out
class Predictor(nn.Module):
def __init__(self, in_dim, out_dim, act=None):
super(Predictor, self).__init__()
self.linear = nn.Linear(in_dim, out_dim)
nn.init.xavier_normal_(self.linear.weight)
self.activation = act
def forward(self, x):
out = self.linear(x)
if self.activation != None:
out = self.activation(out)
return out
class RNNNet(nn.Module):
def __init__(self, args):
super(RNNNet, self).__init__()
self.rnnBlock = RNNBlock(args.in_dim, args.hidden_dim, args.num_layer, args.dropout)
self.pred1 = Predictor(args.hidden_dim, args.pred_dim1, act=nn.ReLU())
self.pred2 = Predictor(args.pred_dim1, args.pred_dim2, act=nn.ReLU())
self.pred3 = Predictor(args.pred_dim2, args.out_dim)
def forward(self, onehot, length):
out = self.rnnBlock(onehot, length)
out = self.pred1(out)
out = self.pred2(out)
out = self.pred3(out)
return out
and this is my train and experiment functions
def train(model, device, optimizer, criterion, data_train, bar, args):
epoch_train_loss = 0
epoch_train_mae = 0
for i, batch in enumerate(data_train):
list_onehot = torch.tensor(batch[0]).cuda().float()
list_length = torch.tensor(batch[1]).cuda()
list_logP = torch.tensor(batch[2]).cuda().float()
# Sort onehot tensor with respect to the sequence length.
list_length, list_index = torch.sort(list_length, descending=True)
list_length.cuda()
list_index.cuda()
list_onehot = torch.Tensor([list_onehot.tolist()[i] for i in list_index]).cuda().float()
model.train()
optimizer.zero_grad()
list_pred_logP = model(list_onehot, list_length).squeeze().cuda()
list_pred_logP.require_grad = False
train_loss = criterion(list_pred_logP, list_logP)
train_mae = mean_absolute_error(list_pred_logP.tolist(), list_logP.tolist())
epoch_train_loss += train_loss.item()
epoch_train_mae += train_mae
train_loss.backward()
optimizer.step()
bar.update(len(list_onehot))
epoch_train_loss /= len(data_train)
epoch_train_mae /= len(data_train)
return model, epoch_train_loss, epoch_train_mae
def experiment(dict_partition, device, bar, args):
time_start = time.time()
model = RNNNet(args)
model.cuda()
if args.optim == 'Adam':
optimizer = optim.Adam(model.parameters(),
lr=args.lr,
weight_decay=args.l2_coef)
elif args.optim == 'RMSprop':
optimizer = optim.RMSprop(model.parameters(),
lr=args.lr,
weight_decay=args.l2_coef)
elif args.optim == 'SGD':
optimizer = optim.SGD(model.parameters(),
lr=args.lr,
weight_decay=args.l2_coef)
else:
assert False, 'Undefined Optimizer Type'
criterion = nn.MSELoss()
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma)
list_train_loss = list()
list_val_loss = list()
list_train_mae = list()
list_val_mae = list()
data_train = DataLoader(dict_partition['train'], batch_size=args.batch_size, shuffle=args.shuffle)
data_val = DataLoader(dict_partition['val'], batch_size=args.batch_size, shuffle=args.shuffle)
for epoch in range(args.epoch):
scheduler.step()
model, train_loss, train_mae = train(model, device, optimizer, criterion, data_train, bar, args)
list_train_loss.append(train_loss)
list_train_mae.append(train_mae)
mode, val_loss, val_mae = validate(model, device, criterion, data_val, bar, args)
list_val_loss.append(val_loss)
list_val_mae.append(val_mae)
data_test = DataLoader(dict_partition['test'], batch_size=args.batch_size, shuffle=args.shuffle)
mae, std, logP_total, pred_logP_total = test(model, device, data_test, args)
time_end = time.time()
time_required = time_end - time_start
args.list_train_loss = list_train_loss
args.list_val_loss = list_val_loss
args.list_train_mae = list_train_mae
args.list_val_mae = list_val_mae
args.logP_total = logP_total
args.pred_logP_total = pred_logP_total
args.mae = mae
args.std = std
args.time_required = time_required
return args
The list_onehot and list_length tensors are loaded from the DataLoader and uploaded to GPU. Then, to use packed sequence as input, I’ve sorted the both list_onehot and list_length and uploaded to GPU. The model was uploaded to GPU and h_in, c_in tensors and packed sequence object were also uploaded to the GPU. However, when I try to run this code, it does not use GPU but only use CPU. What should I do to use GPU to train this model?

Attempting to use uninitialized value - even if I did initialization

Error of initialization occurs even after global initialization
The error about initialization is this:
FailedPreconditionError: Attempting to use uninitialized value biases
[[Node: biases/read = IdentityT=DT_FLOAT, _class=["loc:#Adagrad/update_biases/ApplyAdagrad"], _device="/job:localhost/replica:0/task:0/device:CPU:0"]]
import functools
def lazy_property(function):
attribute = '_cache_' + function.__name__
#property
#functools.wraps(function)
def decorator(self):
if not hasattr(self, attribute):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return decorator
class Model:
def __init__(self, data, target):
self.data = data
self.target = target
self._logits = None
self._prediction = None
self._optimize = None
self._error = None
#lazy_property
def logits(self):
w = tf.Variable(tf.truncated_normal([784, 1]), name='weights')
b = tf.Variable(tf.zeros([1]), name='biases')
self._logits = tf.matmul(self.data, w) + b
return self._logits
#lazy_property
def prediction(self):
self._prediction = tf.nn.softmax(self.logits)
return self._prediction
#lazy_property
def optimize(self):
labels = tf.to_int64(self.target)
logits = self.prediction
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels, name='xentropy')
loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
self._optimize = tf.train.AdagradOptimizer(0.05).minimize(loss)
return self._optimize
#lazy_property
def error(self):
mistakes = tf.not_equal(tf.argmax(self.target, 1), tf.argmax(self.prediction, 1))
return tf.reduce_mean(tf.cast(mistakes, tf.float32))
batch_size = 100
num_steps = 1000
tf.reset_default_graph()
data = MNIST(data_dir="data/MNIST/")
X = tf.placeholder(tf.float32, [batch_size, 784], name='Placeholder_Input')
Y = tf.placeholder(tf.int64, [batch_size], name='Placeholder_Output')
model = Model(X, Y)
with tf.Session() as session:
session.run(tf.global_variables_initializer())
for step in range(num_steps):
model = Model(X,Y)
for _ in range(100):
x_batch, y_true_batch, _ = data.random_batch(batch_size=batch_size)
y_true_batch = np.argmax(y_true_batch, axis=1)
error,_ = session.run(model.optimize, feed_dict={X: x_batch, Y: y_true_batch})
if (step % 100 == 0):
print("Error rate # iter %d : %f" % (step, error))
You should run session.run(tf.global_variables_initializer()) once the model is fully defined. Note that you are defining a new model at each step, and the variables are only being instantiated when you call model.optimize. Here is my recommendation:
model = Model(X,Y)
optimize = model.optimize
with tf.Session() as session:
session.run(tf.global_variables_initializer())
for step in range(num_steps):
for _ in range(100):
x_batch, y_true_batch, _ = data.random_batch(batch_size=batch_size)
y_true_batch = np.argmax(y_true_batch, axis=1)
error,_ = session.run(optimize, feed_dict={X: x_batch, Y: y_true_batch})
if (step % 100 == 0):
print("Error rate # iter %d : %f" % (step, error))