this question has been asked several times already, but I don't seem to be able to adapt previous solutions to my code. I would therefore appreciate any advice on how to solve this. I have tried using pdb and set a trace point right before the problem, which didn't give me much information.
I am adapting this tutorial to my problem:
https://www.oreilly.com/ideas/visualizing-convolutional-neural-networks
Data Shape:
x_train.shape: (1161, 68, 68, 1)
x_test.shape: (216, 68, 68, 1)
y_test.shape: (216,)
y_train.shape: (1161,)
Where the error occurs:
#Train the Model
steps = int(x_train.shape[0]/batchSize)
for i in range(numEpochs):
print(i)
accHist = []
accHist2 = []
#x_train, y_train = imf.shuffle(x_train, y_train)
for j in range(steps):
print(j)
#Calculate our current step
step = i * steps + j
#Feed forward batch of train images into graph and log accuracy
acc = sess.run([accuracy], feed_dict={X: x_train[(j*batchSize):((j+1)*batchSize),:,:,:], Y_: np.array(y_train[(j*batchSize):((j+1)*batchSize)]).reshape(1,30), keepRate1: 1, keepRate2: 1})
print(accHist)
accHist.append(acc)
#Back propigate using adam optimizer to update weights and biases.
sess.run(train_step, feed_dict={X: x_train[(j*batchSize):((j+1)*batchSize),:,:,:], Y_: np.array(y_train[(j*batchSize):((j+1)*batchSize)]).reshape(1,30), keepRate1: 0.2, keepRate2: 0.5})
print("success")
print('Epoch number {} Training Accuracy: {}'.format(i+1, np.mean(accHist)))
#Feed forward all test images into graph and log accuracy
for k in range(int(x_test.shape[0]/batchSize)):
acc = sess.run(accuracy, feed_dict={X: x_test[(k*batchSize):((k+1)*batchSize),:,:,:], Y_: np.array(y_test[(k*batchSize):((k+1)*batchSize)]).reshape(1,30), keepRate1: 1, keepRate2: 1})
accHist2.append(acc)
print("Test Set Accuracy: {}".format(np.mean(accHist2)))
I am getting the following error message:
InvalidArgumentError: logits and labels must be same size: logits_size=[30,30] labels_size=[1,30]
[[Node: cross_entropy_7/SoftmaxCrossEntropyWithLogits = SoftmaxCrossEntropyWithLogits[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/cpu:0"](cross_entropy_7/Reshape, cross_entropy_7/Reshape_1)]]
Following the tutorial, I thought the logins were set here:
#FULLY CONNECTED 3 & SOFTMAX OUTPUT
with tf.name_scope('softmax') as scope:
fc2w = tf.Variable(tf.truncated_normal([512, classes], dtype=tf.float32,
stddev=1e-1), name='weights3_2')
fc2b = tf.Variable(tf.constant(1.0, shape=[classes], dtype=tf.float32),
trainable=True, name='biases3_2')
Ylogits = tf.nn.bias_add(tf.matmul(fc1_drop, fc2w), fc2b)
Y = tf.nn.softmax(Ylogits)
print(Ylogits.shape) here gives me: (?, 30). Classes is set at 30 so this seems to make sense.
This seems to be the functions that doesn't work, so I printed the shapes:
with tf.name_scope('cross_entropy'):
print(Ylogits.shape)
print(Y.shape)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=Y_)
loss = tf.reduce_mean(cross_entropy)
Which gave me:
(?, 30)
(?, 30)
When executing the line for back propagation above though this does not seem to work. Can anyone help?
In response to comment (this basically is the tutorial code from the link mentioned above):
Place Holders:
classes = 30
X = tf.placeholder(tf.float32, name="X-placeholder", shape=(None, 68, 68, 1))
Y_ = tf.placeholder(tf.float32, [None, classes], name="Y_-placeholder")
keepRate1 = tf.placeholder(tf.float32, name="keepRate1-placeholder")
keepRate2 = tf.placeholder(tf.float32, name="keepRate2-placeholder")
Model:
# CONVOLUTION 1 - 1
with tf.name_scope('conv1_1'):
filter1_1 = tf.Variable(tf.truncated_normal([3, 3, 1, 32], dtype=tf.float32,
stddev=1e-1), name='weights1_1')
stride = [1,1,1,1]
conv = tf.nn.conv2d(X, filter1_1, stride, padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[32], dtype=tf.float32),
trainable=True, name='biases1_1')
out = tf.nn.bias_add(conv, biases)
conv1_1 = tf.nn.relu(out)
# CONVOLUTION 1 - 2
with tf.name_scope('conv1_2'):
filter1_2 = tf.Variable(tf.truncated_normal([3, 3, 32, 32], dtype=tf.float32,
stddev=1e-1), name='weights1_2')
conv = tf.nn.conv2d(conv1_1, filter1_2, [1,1,1,1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[32], dtype=tf.float32),
trainable=True, name='biases1_2')
out = tf.nn.bias_add(conv, biases)
conv1_2 = tf.nn.relu(out)
# POOL 1
with tf.name_scope('pool1'):
pool1_1 = tf.nn.max_pool(conv1_2,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool1_1')
pool1_1_drop = tf.nn.dropout(pool1_1, keepRate1)
# CONVOLUTION 2 - 1
with tf.name_scope('conv2_1'):
filter2_1 = tf.Variable(tf.truncated_normal([3, 3, 32, 64], dtype=tf.float32,
stddev=1e-1), name='weights2_1')
conv = tf.nn.conv2d(pool1_1_drop, filter2_1, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32),
trainable=True, name='biases2_1')
out = tf.nn.bias_add(conv, biases)
conv2_1 = tf.nn.relu(out)
# CONVOLUTION 2 - 2
with tf.name_scope('conv2_2'):
filter2_2 = tf.Variable(tf.truncated_normal([3, 3, 64, 64], dtype=tf.float32,
stddev=1e-1), name='weights2_2')
conv = tf.nn.conv2d(conv2_1, filter2_2, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32),
trainable=True, name='biases2_2')
out = tf.nn.bias_add(conv, biases)
conv2_2 = tf.nn.relu(out)
# POOL 2
with tf.name_scope('pool2'):
pool2_1 = tf.nn.max_pool(conv2_2,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool2_1')
pool2_1_drop = tf.nn.dropout(pool2_1, keepRate1)
#FULLY CONNECTED 1
with tf.name_scope('fc1') as scope:
shape = int(np.prod(pool2_1_drop.get_shape()[1:]))
fc1w = tf.Variable(tf.truncated_normal([shape, 512], dtype=tf.float32,
stddev=1e-1), name='weights3_1')
fc1b = tf.Variable(tf.constant(1.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases3_1')
pool2_flat = tf.reshape(pool2_1_drop, [-1, shape])
out = tf.nn.bias_add(tf.matmul(pool2_flat, fc1w), fc1b)
fc1 = tf.nn.relu(out)
fc1_drop = tf.nn.dropout(fc1, keepRate2)
#FULLY CONNECTED 3 & SOFTMAX OUTPUT
with tf.name_scope('softmax') as scope:
fc2w = tf.Variable(tf.truncated_normal([512, classes], dtype=tf.float32,
stddev=1e-1), name='weights3_2')
fc2b = tf.Variable(tf.constant(1.0, shape=[classes], dtype=tf.float32),
trainable=True, name='biases3_2')
Ylogits = tf.nn.bias_add(tf.matmul(fc1_drop, fc2w), fc2b)
Y = tf.nn.softmax(Ylogits)
numEpochs = 400
batchSize = 30
alpha = 1e-5
with tf.name_scope('cross_entropy'):
print(Ylogits.shape)
print(Y.shape)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=Y_)
loss = tf.reduce_mean(cross_entropy)
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.name_scope('train'):
train_step = tf.train.AdamOptimizer(learning_rate=alpha).minimize(loss)
#Create Session and insert variables
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
The tensor shape (?, 30) denotes that the batch size is not set, so you can feed any batch size data to your graph, the problem is that then you can run into these kinds of problems, and need to keep track of the tensor shapes in your head.
The thing you need to fix is: either you have 30 images in one batch, but only 1 label in one batch, which needs to be fixed, because you cannot compute loss for 30 images with only one label, you either need to decrease number of images to 1 or increase label batch size to 30, it could also be that somewhere you are reshaping the tensors incorrectly.
I would look at where you read your data in, and then batch it, that is most likely where the problem will be, or at places where you are reshaping them.
Post your entire code, it would be more helpful.
Related
I am building a convolutional neural network for classifying MNIST data. I m using 2 conv layer and 2 fully connected layer.
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
def _net_params():
weights = {
'conv1': tf.Variable(tf.random_normal([5, 5, 1, 32])),
'conv2': tf.Variable(tf.random_normal([5, 5, 32, 64])),
'fc1': tf.Variable(tf.random_normal([7 * 7 * 64, 1024])),
'fc2': tf.Variable(tf.random_normal([1024, 10])),
}
biases = {
'conv1': tf.Variable(tf.random_normal([32]),tf.float32),
'conv2': tf.Variable(tf.random_normal([64]),tf.float32),
'fc1': tf.Variable(tf.random_normal([1024]),tf.float32),
'fc2': tf.Variable(tf.random_normal([10]),tf.float32),
}
return weights, biases
def _fc_layer(inputs, weights, biases):
return tf.add(tf.matmul(inputs, weights), biases)
def _conv_layer(inputs, weights, biases, stride=1, padding='SAME'):
layer = tf.nn.conv2d(input=inputs,filter=weights,
strides=[1, stride, stride, 1],padding=padding)
layer = tf.nn.bias_add(layer, biases)
return tf.nn.relu(layer)
def pool_layer(inputs):
pool = tf.nn.max_pool(inputs, ksize=[1, 2, 2, 1], strides=[1, 2,
2, 1], padding="SAME")
return pool
def conv_net(x):
weights, biases = _net_params()
x = tf.reshape(x, shape=[-1, 28, 28, 1])
# Conv layers
conv1 = _conv_layer(x, weights['conv1'], biases['conv1'])
pool1 = pool_layer(conv1)
conv2 = _conv_layer(pool1, weights['conv2'], biases['conv2'])
pool2 = pool_layer(conv2)
flattened = tf.reshape(pool2, [-1, 7 * 7 * 64])
fc1 = _fc_layer(flattened, weights['fc1'], biases['fc1'])
fc1 = tf.nn.relu(fc1)
fc2 = _fc_layer(fc1, weights['fc2'], biases['fc2'])
return fc2
def _training():
x = tf.placeholder(tf.float32, [None, 784])
y_ = tf.placeholder(tf.float32, [None, 10])
learning_rate_ = tf.placeholder(tf.float32)
pred = conv_net(x)
cost = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=pred,
labels=y_))
optimizer = tf.train.AdamOptimizer(
learning_rate=learning_rate_).minimize(cost)
# optimizer = tf.train.GradientDescentOptimizer(
learning_rate=learning_rate_).minimize(cost)
correct = tf.equal(tf.argmax(pred, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
return x, y_, learning_rate_, optimizer, cost, accuracy
def main():
mnist = input_data.read_data_sets('tmp/data', one_hot=True)
n_epochs = 3
batch_size = 200
learning_rate = 0.005
x, y_, learning_rate_, optimizer, cost, accuracy = _training()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
current_epoch = 0
while current_epoch < n_epochs:
current_epoch += 1
print('epoch %s' % (current_epoch,))
current_batch = 1
while current_batch * batch_size <= len(
mnist.train.images):
current_batch += 1
batch_x, batch_y = mnist.train.next_batch(batch_size)
sess.run(fetches=optimizer, feed_dict={x: batch_x,
y_: batch_y, learning_rate_: learning_rate, })
if current_batch % 75 == 0:
loss, acc = sess.run([cost, accuracy], feed_dict=
{x: batch_x, y_: batch_y, learning_rate_: 0.,})
print(' batch %s: batch_loss=%s,
training_accuracy=%s' % (current_batch,
loss, acc,))
print('Training complete !')
print('Final accuracy is %s' % sess.run(accuracy, feed_dict=
{x: mnist.test.images, y_: mnist.test.labels,
learning_rate_: 0.}))
if __name__ == '__main__':
main()
(there might be some indentation error while putting this code here in stack overdlow)
When i used AdamOptimizer, i m getting accuracy >95%.
Accuracy for AdamOptimizer
But when i used GradientDescentOptimizer, i m getting accuracy of 10%.
accuracy for GradientDescentOptimizer
Do you know why i m getting this lower accuracy and how to fix this if i want to use GradientDescentOptimizer.
Thanks
I am looking at the examples/image_orc.py example in Keras, when I run it I see something like
_______________
max2 (MaxPooling2D) (None, 32, 16, 16) 0 conv2[0][0]
____________________________________________________________________________________________________
reshape (Reshape) (None, 32, 256) 0 max2[0][0]
____________________________________________________________________________________________________
dense1 (Dense) (None, 32, 32) 8224 reshape[0][0]
_____________________________________________________________________________________
The Dense layer outputs a tensor 32x32. I am trying to replicate this in pur TensorFlow where tf.matmul would be used, but how can I output 32x32 using matmul?
Addition:
I am not trying to replicate the Keras example exactly,
w = 128; h = 64
# junk image, only one
dataset = np.zeros((1,w,h,1))
import tensorflow as tf
pool_size = 1
num_filters = 16
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
inputs = tf.placeholder(tf.float32, [None, w, h, 1])
W_conv1 = weight_variable([3, 3, 1, num_filters])
b_conv1 = bias_variable([num_filters])
h_conv1 = tf.nn.relu(conv2d(inputs, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([3, 3, num_filters, num_filters])
b_conv2 = bias_variable([num_filters])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
h_pool2_flat = tf.reshape(h_pool2, [-1, 32, 256])
W_fc1 = weight_variable([256, 32])
b_fc1 = bias_variable([32])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
print inputs.shape
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(h_pool2_flat, feed_dict={inputs: dataset})
print 'output',output.shape
And I get
ValueError: Shape must be rank 2 but is rank 3 for 'MatMul_5' (op: 'MatMul') with input shapes: [?,32,256], [256,32].
A smaller example
import numpy as np
import tensorflow as tf
dataset = np.zeros((3,2,4))
inputs = tf.placeholder(tf.float32, [None, 2, 4])
print inputs
W = tf.zeros((4,5))
print W
W2 = tf.matmul(inputs, W)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(W2, feed_dict={inputs: dataset})
print 'output',output.shape
This also gives similar error
ValueError: Shape must be rank 2 but is rank 3 for 'MatMul_12' (op: 'MatMul') with input shapes: [?,2,4], [4,5].
Any ideas?
Thanks,
That 32 is there because it was in the previous layer. It keeps unchanged.
The tf.matmul multiplies considering the two last dimensions, as stated here. (See the examples taking more than two dimensions)
I see you've got a Dense(32) there, with input size = 256.
This means that the weights matrix is (256,32). In keras, the multiplication as seen here is inputs x kernel.
So, if you have the input tensor shaped as (?, any, 256), and the weights matrix shaped as (256,32), all you need is:
output = tf.matmul(input,weights)
This will output a shape (?, any, 32) - any is there untouched because it just was there before.
You may also want to sum the biases, which will follow the same principle. You need a bias vector of shape (32,).
This is a classification model for ten categories of pictures. My code has three files, one is the CNN model convNet.py, one is read_TFRecord.py to read data, one is train.py to train and evaluation model. Training set of samples of 80,000, validation set of sample of 20,000.
Question:
In the first epoch:
training loss = 2.11, train accuracy = 25.61%
validation loss = 3.05, validation accuracy = 8.29%
Why validation loss are significantly different right from the start? And why the validation accuracy is always below 10%?
In the 10 epoch of training:
The training process is always in normal learning. The validation loss in the slow increase, the validation accuracy has been shock in about 10%. Is it over-fitting? But I have taken some measures, such as adding regularized losses, droupout. I do not know where the problem is. I hope you can help me.
convNet.py:
def convNet(features, mode):
input_layer = tf.reshape(features, [-1, 100, 100, 3])
tf.summary.image('input', input_layer)
# conv1
with tf.name_scope('conv1'):
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=5,
padding="same",
activation=tf.nn.relu,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
name='conv1'
)
conv1_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'conv1')
tf.summary.histogram('kernel', conv1_vars[0])
tf.summary.histogram('bias', conv1_vars[1])
tf.summary.histogram('act', conv1)
# pool1 100->50
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2, name='pool1')
# dropout
pool1_dropout = tf.layers.dropout(
inputs=pool1, rate=0.5, training=tf.equal(mode, learn.ModeKeys.TRAIN), name='pool1_dropout')
# conv2
with tf.name_scope('conv2'):
conv2 = tf.layers.conv2d(
inputs=pool1_dropout,
filters=64,
kernel_size=5,
padding="same",
activation=tf.nn.relu,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
name='conv2'
)
conv2_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'conv2')
tf.summary.histogram('kernel', conv2_vars[0])
tf.summary.histogram('bias', conv2_vars[1])
tf.summary.histogram('act', conv2)
# pool2 50->25
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2, name='pool2')
# dropout
pool2_dropout = tf.layers.dropout(
inputs=pool2, rate=0.5, training=tf.equal(mode, learn.ModeKeys.TRAIN), name='pool2_dropout')
# conv3
with tf.name_scope('conv3'):
conv3 = tf.layers.conv2d(
inputs=pool2_dropout,
filters=128,
kernel_size=3,
padding="same",
activation=tf.nn.relu,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
name='conv3'
)
conv3_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'conv3')
tf.summary.histogram('kernel', conv3_vars[0])
tf.summary.histogram('bias', conv3_vars[1])
tf.summary.histogram('act', conv3)
# pool3 25->12
pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[2, 2], strides=2, name='pool3')
# dropout
pool3_dropout = tf.layers.dropout(
inputs=pool3, rate=0.5, training=tf.equal(mode, learn.ModeKeys.TRAIN), name='pool3_dropout')
# conv4
with tf.name_scope('conv4'):
conv4 = tf.layers.conv2d(
inputs=pool3_dropout,
filters=128,
kernel_size=3,
padding="same",
activation=tf.nn.relu,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
name='conv4'
)
conv4_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'conv4')
tf.summary.histogram('kernel', conv4_vars[0])
tf.summary.histogram('bias', conv4_vars[1])
tf.summary.histogram('act', conv4)
# pool4 12->6
pool4 = tf.layers.max_pooling2d(inputs=conv4, pool_size=[2, 2], strides=2, name='pool4')
# dropout
pool4_dropout = tf.layers.dropout(
inputs=pool4, rate=0.5, training=tf.equal(mode, learn.ModeKeys.TRAIN), name='pool4_dropout')
pool4_flat = tf.reshape(pool4_dropout, [-1, 6 * 6 * 128])
# fc1
with tf.name_scope('fc1'):
fc1 = tf.layers.dense(inputs=pool4_flat, units=1024, activation=tf.nn.relu,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
kernel_regularizer=tf.contrib.layers.l2_regularizer(0.01),
name='fc1')
fc1_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'fc1')
tf.summary.histogram('kernel', fc1_vars[0])
tf.summary.histogram('bias', fc1_vars[1])
tf.summary.histogram('act', fc1)
# dropout
fc1_dropout = tf.layers.dropout(
inputs=fc1, rate=0.3, training=tf.equal(mode, learn.ModeKeys.TRAIN), name='fc1_dropout')
# fc2
with tf.name_scope('fc2'):
fc2 = tf.layers.dense(inputs=fc1_dropout, units=512, activation=tf.nn.relu,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
kernel_regularizer=tf.contrib.layers.l2_regularizer(0.01),
name='fc2')
fc2_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'fc2')
tf.summary.histogram('kernel', fc2_vars[0])
tf.summary.histogram('bias', fc2_vars[1])
tf.summary.histogram('act', fc2)
# dropout
fc2_dropout = tf.layers.dropout(
inputs=fc2, rate=0.3, training=tf.equal(mode, learn.ModeKeys.TRAIN), name='fc2_dropout')
# logits
with tf.name_scope('out'):
logits = tf.layers.dense(inputs=fc2_dropout, units=10, activation=None,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
kernel_regularizer=tf.contrib.layers.l2_regularizer(0.01),
name='out')
out_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'out')
tf.summary.histogram('kernel', out_vars[0])
tf.summary.histogram('bias', out_vars[1])
tf.summary.histogram('act', logits)
return logits
read_TFRecord.py:
def read_and_decode(filename, width, height, channel):
filename_queue = tf.train.string_input_producer([filename])
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(serialized_example,
features={
'label': tf.FixedLenFeature([], tf.int64),
'img_raw': tf.FixedLenFeature([], tf.string),
})
img = tf.decode_raw(features['img_raw'], tf.uint8)
img = tf.reshape(img, [width, height, channel])
img = tf.cast(img, tf.float16) * (1. / 255) - 0.5
label = tf.cast(features['label'], tf.int16)
return img, label
train.py:
# step 1
TRAIN_TFRECORD = 'F:/10-image-set2/train.tfrecords' # train data set
VAL_TFRECORD = 'F:/10-image-set2/val.tfrecords' # validation data set
WIDTH = 100 # image width
HEIGHT = 100 # image height
CHANNEL = 3 # image channel
TRAIN_BATCH_SIZE = 64
VAL_BATCH_SIZE = 16
train_img, train_label = read_and_decode(TRAIN_TFRECORD, WIDTH, HEIGHT,
CHANNEL)
val_img, val_label = read_and_decode(VAL_TFRECORD, WIDTH, HEIGHT, CHANNEL)
x_train_batch, y_train_batch = tf.train.shuffle_batch([train_img,
train_label], batch_size=TRAIN_BATCH_SIZE,
capacity=80000,min_after_dequeue=79999,
num_threads=64,name='train_shuffle_batch')
x_val_batch, y_val_batch = tf.train.shuffle_batch([val_img, val_label],
batch_size=VAL_BATCH_SIZE,
capacity=20000,min_after_dequeue=19999,
num_threads=64, name='val_shuffle_batch')
# step 2
x = tf.placeholder(tf.float32, shape=[None, WIDTH, HEIGHT, CHANNEL],
name='x')
y_ = tf.placeholder(tf.int32, shape=[None, ], name='y_')
mode = tf.placeholder(tf.string, name='mode')
step = tf.get_variable(shape=(), dtype=tf.int32, initializer=tf.zeros_initializer(), name='step')
tf.add_to_collection(tf.GraphKeys.GLOBAL_STEP, step)
logits = convNet(x, mode)
with tf.name_scope('Reg_losses'):
reg_losses = tf.cond(tf.equal(mode, learn.ModeKeys.TRAIN),
lambda: tf.add_n(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)),
lambda: tf.constant(0, dtype=tf.float32))
with tf.name_scope('Loss'):
loss = tf.losses.sparse_softmax_cross_entropy(labels=y_, logits=logits) + reg_losses
train_op = tf.train.AdamOptimizer().minimize(loss, step)
correct_prediction = tf.equal(tf.cast(tf.argmax(logits, 1), tf.int32), y_)
with tf.name_scope('Accuracy'):
acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# step 3
tf.summary.scalar("reg_losses", reg_losses)
tf.summary.scalar("loss", loss)
tf.summary.scalar("accuracy", acc)
merged = tf.summary.merge_all()
# step 4
with tf.Session() as sess:
summary_dir = './logs/summary/'
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver = tf.train.Saver(max_to_keep=1)
train_writer = tf.summary.FileWriter(summary_dir + 'train',
sess.graph)
valid_writer = tf.summary.FileWriter(summary_dir + 'valid')
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
max_acc = 0
MAX_EPOCH = 10
for epoch in range(MAX_EPOCH):
# training
train_step = int(80000 / TRAIN_BATCH_SIZE)
train_loss, train_acc = 0, 0
for step in range(epoch * train_step, (epoch + 1) * train_step):
x_train, y_train = sess.run([x_train_batch, y_train_batch])
train_summary, _, err, ac = sess.run([merged, train_op, loss, acc],
feed_dict={x: x_train, y_: y_train,
mode: learn.ModeKeys.TRAIN,
global_step: step})
train_loss += err
train_acc += ac
if (step + 1) % 50 == 0:
train_writer.add_summary(train_summary, step)
print("Epoch %d,train loss= %.2f,train accuracy=%.2f%%" % (
epoch, (train_loss / train_step), (train_acc / train_step * 100.0)))
# validation
val_step = int(20000 / VAL_BATCH_SIZE)
val_loss, val_acc = 0, 0
for step in range(epoch * val_step, (epoch + 1) * val_step):
x_val, y_val = sess.run([x_val_batch, y_val_batch])
val_summary, err, ac = sess.run([merged, loss, acc],
feed_dict={x: x_val, y_: y_val, mode: learn.ModeKeys.EVAL,
global_step: step})
val_loss += err
val_acc += ac
if (step + 1) % 50 == 0:
valid_writer.add_summary(val_summary, step)
print(
"Epoch %d,validation loss= %.2f,validation accuracy=%.2f%%" % (
epoch, (val_loss / val_step), (val_acc / val_step * 100.0)))
# save model
if val_acc > max_acc:
max_acc = val_acc
saver.save(sess, summary_dir + '/10-image.ckpt', epoch)
print("model saved")
coord.request_stop()
coord.join(threads)
Tensorboard result:
(Orange is train.Blue is validation.)
accuracy-loss-reg_losses-conv1-conv2-conv3-conv4-fc1-fc2-output
My data:
train-val
I doubt this is an issue of overfitting - the losses are significantly different right from the start and diverge further well before you get through your first epoch (~500 batches). Without seeing your dataset its difficult to say more, though as a first step I'd encourage you to visualize both training and evaluation input data to make sure the issue isn't something there. The fact that you get significantly less than 10% on a 10-class classification problem initially indicates you almost certainly don't have something wrong here.
Having said that, you will likely run into problems with overfitting using this model because, despite what you may think, you aren't using dropout or regularization.
Dropout: mode == learn.ModeKeys is false if mode is a tensor, so you're not using dropout ever. You could use tf.equals(mode, learn.ModeKeys), but I think you'd be much better off passing a training bool tensor to your convNet and feeding in the appropriate value.
Regularization: you're creating the regularization loss terms and they're being added to the tf.GraphKeys.REGULARIZATION_LOSSES collection, but the loss you're minimizing doesn't use them. Add the following:
loss += tf.add_n(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
before you optimize.
A note on the optimization step: you shouldn't be feeding in a value to the session run like you are. Every time you run to optimization operation it will update the value passed to step when you created it, so just create it with an int variable and leave it alone. See the following example code:
import tensorflow as tf
x = tf.get_variable(shape=(4, 3), dtype=tf.float32,
initializer=tf.random_normal_initializer(), name='x')
loss = tf.nn.l2_loss(x)
step = tf.get_variable(shape=(), dtype=tf.int32,
initializer=tf.zeros_initializer(), name='step')
tf.add_to_collection(tf.GraphKeys.GLOBAL_STEP, step) # good practice
opt = tf.train.AdamOptimizer().minimize(loss, step)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
step_val = sess.run(step)
print(step_val) # 0
sess.run(opt)
step_val = sess.run(step)
print(step_val) # 1
I am trying to use Tensorflow to classify some object representations. I used the same architecture as in the Tensorflow Cifar-10 example, with the last layer defined as:
with tf.variable_scope('sigmoid_linear') as scope:
weights = _variable_with_weight_decay('weights', [192, num_classes],
stddev=1 / 192.0, wd=0.0)
biases = _variable_on_cpu('biases', [num_classes],
initializer)
sigmoid_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
_activation_summary(sigmoid_linear)
return sigmoid_linear
In my case, num_classes is 2, and the amount of channels in the representation fed to the neural network is 8. Furthermore, I'm currently debugging with only 5 examples. The output of the last layer has a shape of[40,2]. I expect the first dimension is due to 5 examples * 8 channels and the second due to the number of classes.
In order to use compare the logits and the labels using e.g. tensorflow.nn.SparseSoftmaxCrossEntropyWithLogits I need them to have a common shape. How can I interpret the current content of the logits in the current shape, and how can I reduce the first dimension of the logits to be the same as num_classes?
Edit: the shape of the input to the inference function has a shape of [5,101,1008,8]. The inference function is defined as:
def inference(representations):
"""Build the model.
Args:
STFT spectra: spectra returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
# conv1
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, nChannels, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(representations, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], initializer,
)
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv1)
# pool1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
# norm1
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm1')
# conv2
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 64, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], initializer)
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv2)
# norm2
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm2')
# pool2
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# local3
with tf.variable_scope('local3') as scope:
# Move everything into depth so we can perform a single matrix multiply.
reshape = tf.reshape(pool2, [batch_size, -1])
dim = reshape.get_shape()[1].value
weights = _variable_with_weight_decay('weights', shape=[dim, 384],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [384], initializer)
local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
_activation_summary(local3)
# local4
with tf.variable_scope('local4') as scope:
weights = _variable_with_weight_decay('weights', shape=[384, 192],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [192], initializer)
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
_activation_summary(local4)
with tf.variable_scope('sigmoid_linear') as scope:
weights = _variable_with_weight_decay('weights', [192, num_classes],
stddev=1 / 192.0, wd=0.0)
biases = _variable_on_cpu('biases', [num_classes],
initializer)
sigmoid_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
_activation_summary(sigmoid_linear)
return sigmoid_linear
After more debugging I could find the problem. The posted code with the layers, originally from the Tensorflow tutorial, works well (of course it does). I printed all shapes, after each layer, and found out that the number 40 was not due to 5 examples * 8 channels, but that I had previously set batch_size = 40, and thus also higher than the amount of training examples. The mismatch began after the reshaping in the local layer 3. The question can now be closed.
I am trying to generate an unrecognizable image which can fool Vggnet. I used the following vgg model for tensorflow. I add some modification for calculating the gradient. In the ending part, you can see my modification for calculating the gradient respect to the given image (is it correct? I am trying to generate an image to whom the vggnet assign high probability at class 1). With this gradient, I update the random image for fooling the vggnet. But this is not so successful. I can't generate an image with high probability. The maximum probability I got is around 0.001. How can I make it keep increasing?
.
Vggnet model
#
# Davi Frossard, 2016 #
# VGG16 implementation in TensorFlow #
# Details: #
# http://www.cs.toronto.edu/~frossard/post/vgg16/ #
# #
# Model from https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-md #
# Weights from Caffe converted using https://github.com/ethereon/caffe-tensorflow #########################################################################################
import tensorflow as tf
import numpy as np
from scipy.misc import imread, imresize
from imagenet_classes import class_names
class vgg16:
def __init__(self, imgs, weights=None, sess=None):
self.imgs = imgs
self.convlayers()
self.fc_layers()
self.probs = tf.nn.softmax(self.fc3l, name= 'prob')
if weights is not None and sess is not None:
self.load_weights(weights, sess)
def convlayers(self):
self.parameters = []
# zero-mean input
with tf.name_scope('preprocess') as scope:
mean = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32, shape=[1, 1, 1, 3], name='img_mean')
images = self.imgs-mean
# conv1_1
with tf.name_scope('conv1_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 3, 64], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv1_1 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv1_2
with tf.name_scope('conv1_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 64], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv1_1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv1_2 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# pool1
self.pool1 = tf.nn.max_pool(self.conv1_2,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool1')
# conv2_1
with tf.name_scope('conv2_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 128], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.pool1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv2_1 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv2_2
with tf.name_scope('conv2_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 128, 128], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv2_1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv2_2 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# pool2
self.pool2 = tf.nn.max_pool(self.conv2_2,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool2')
# conv3_1
with tf.name_scope('conv3_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 128, 256], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.pool2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv3_1 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv3_2
with tf.name_scope('conv3_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv3_1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv3_2 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv3_3
with tf.name_scope('conv3_3') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv3_2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv3_3 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# pool3
self.pool3 = tf.nn.max_pool(self.conv3_3,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool3')
# conv4_1
with tf.name_scope('conv4_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.pool3, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv4_1 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv4_2
with tf.name_scope('conv4_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv4_1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv4_2 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv4_3
with tf.name_scope('conv4_3') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv4_2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv4_3 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# pool4
self.pool4 = tf.nn.max_pool(self.conv4_3,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool4')
# conv5_1
with tf.name_scope('conv5_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.pool4, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv5_1 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv5_2
with tf.name_scope('conv5_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv5_1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv5_2 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv5_3
with tf.name_scope('conv5_3') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv5_2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv5_3 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# pool5
self.pool5 = tf.nn.max_pool(self.conv5_3,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool4')
def fc_layers(self):
# fc1
with tf.name_scope('fc1') as scope:
shape = int(np.prod(self.pool5.get_shape()[1:]))
fc1w = tf.Variable(tf.truncated_normal([shape, 4096],
dtype=tf.float32,
stddev=1e-1), name='weights')
fc1b = tf.Variable(tf.constant(1.0, shape=[4096], dtype=tf.float32),
trainable=True, name='biases')
pool5_flat = tf.reshape(self.pool5, [-1, shape])
fc1l = tf.nn.bias_add(tf.matmul(pool5_flat, fc1w), fc1b)
self.fc1 = tf.nn.relu(fc1l)
self.parameters += [fc1w, fc1b]
# fc2
with tf.name_scope('fc2') as scope:
fc2w = tf.Variable(tf.truncated_normal([4096, 4096],
dtype=tf.float32,
stddev=1e-1), name='weights')
fc2b = tf.Variable(tf.constant(1.0, shape=[4096], dtype=tf.float32),
trainable=True, name='biases')
fc2l = tf.nn.bias_add(tf.matmul(self.fc1, fc2w), fc2b)
self.fc2 = tf.nn.relu(fc2l)
self.parameters += [fc2w, fc2b]
# fc3
with tf.name_scope('fc3') as scope:
fc3w = tf.Variable(tf.truncated_normal([4096, 1000],
dtype=tf.float32,
stddev=1e-1), name='weights')
fc3b = tf.Variable(tf.constant(1.0, shape=[1000], dtype=tf.float32),
trainable=True, name='biases')
self.fc3l = tf.nn.bias_add(tf.matmul(self.fc2, fc3w), fc3b)
self.parameters += [fc3w, fc3b]
###################### Modified part######################
with tf.name_scope('grad') as scope:
temp = np.zeros(1000)
temp[0] = 1
vec = tf.constant(temp, dtype='float32', name = 'goal')
loss = tf.reduce_mean(tf.square(tf.sub(tf.nn.softmax(self.fc3l), vec)))
self.grad = tf.gradients(loss, self.imgs)[-1]
##############################################################
def load_weights(self, weight_file, sess):
weights = np.load(weight_file)
keys = sorted(weights.keys())
for i, k in enumerate(keys):
print i, k, np.shape(weights[k])
sess.run(self.parameters[i].assign(weights[k]))
Create session
#
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
sess = tf.Session()
imgs = tf.placeholder(tf.float32, [None, 224, 224, 3])
vgg = vgg16(imgs, 'vgg16_weights.npz', sess)
Generate new image for fooling
#
imarray = np.random.rand(224,224,3) * 255
imarray = imarray.astype('float32')
feed_dict = {vgg.imgs: [imarray]}
prob_list = []
prob_list.append(sess.run(vgg.probs, feed_dict={vgg.imgs: [imarray]})[0][0])
lamda = 0.1
#mean = np.array([123.68, 116.779, 103.939])
print 'start'
for i in range(1000):
rst = sess.run(vgg.grad, feed_dict)
imarray -= lamda * (rst[0]*255)
feed_dict = {vgg.imgs: [imarray]}
prob_list.append(sess.run(vgg.probs, feed_dict={vgg.imgs: [imarray]})[0][0])
I'm surprised that the shapes of the gradient and the image match.
You are taking the derivative of the loss with respect to the parameters, is should be with respect to the image placeholder. Excuse me, if I'm missing something obvious, I can't run the code right now.
The computation of the loss is based on fc3l, the final output is probs. I don't see where probs is computed in the VGG code. Maybe there are layers in between. You could plot the first component of fc3l instead, see if that goes up.
You should probably base the loss on probs.