MNIST Classification: low accuracy (10%) and no change in loss - tensorflow

I'm learning TensorFlow and tired to apply on mnist database.
My question is (see attached image) :
what could cause such output for accuracy (improving and then degrading!) & Loss (almost constant!)
the accuracy isn't that great just hovering around 10%
Despite:
5 layer network (incl. output layer), with 200/10/60/30/10 neurons respectively
Is the network not learning ? despite 0.1 learning rate (which is quite high I believe)
Full code: https://github.com/vibhorj/tf > mnist-2.py
1) here's how the layers are defined:
K,L,M,N=200,100,60,30
""" Layer 1 """
with tf.name_scope('L1'):
w1 = tf.Variable(initial_value = tf.truncated_normal([28*28,K],mean=0,stddev=0.1), name = 'w1')
b1 = tf.Variable(initial_value = tf.truncated_normal([K],mean=0,stddev=0.1), name = 'b1')
""" Layer 2 """
with tf.name_scope('L2'):
w2 = tf.Variable(initial_value =tf.truncated_normal([K,L],mean=0,stddev=0.1), name = 'w2')
b2 = tf.Variable(initial_value = tf.truncated_normal([L],mean=0,stddev=0.1), name = 'b2')
""" Layer 3 """
with tf.name_scope('L3'):
w3 = tf.Variable(initial_value = tf.truncated_normal([L,M],mean=0,stddev=0.1), name = 'w3')
b3 = tf.Variable(initial_value = tf.truncated_normal([M],mean=0,stddev=0.1), name = 'b3')
""" Layer 4 """
with tf.name_scope('L4'):
w4 = tf.Variable(initial_value = tf.truncated_normal([M,N],mean=0,stddev=0.1), name = 'w4')
b4 = tf.Variable(initial_value = tf.truncated_normal([N],mean=0,stddev=0.1), name = 'b4')
""" Layer output """
with tf.name_scope('L_out'):
w_out = tf.Variable(initial_value = tf.truncated_normal([N,10],mean=0,stddev=0.1), name = 'w_out')
b_out = tf.Variable(initial_value = tf.truncated_normal([10],mean=0,stddev=0.1), name = 'b_out')
2) loss function
Y1 = tf.nn.sigmoid(tf.add(tf.matmul(X,w1),b1), name='Y1')
Y2 = tf.nn.sigmoid(tf.add(tf.matmul(Y1,w2),b2), name='Y2')
Y3 = tf.nn.sigmoid(tf.add(tf.matmul(Y2,w3),b3), name='Y3')
Y4 = tf.nn.sigmoid(tf.add(tf.matmul(Y3,w4),b4), name='Y4')
Y_pred_logits = tf.add(tf.matmul(Y4, w_out),b_out,name='logits')
Y_pred_prob = tf.nn.softmax(Y_pred_logits, name='probs')
error = -tf.matmul(Y
, tf.reshape(tf.log(Y_pred_prob),[10,-1]), name ='err')
loss = tf.reduce_mean(error, name = 'loss')
3) optimization function
opt = tf.train.GradientDescentOptimizer(0.1)
grads_and_vars = opt.compute_gradients(loss)
ctr = tf.Variable(0.0, name='ctr')
z = opt.apply_gradients(grads_and_vars, global_step=ctr)
4) Tensorboard code:
evt_file = tf.summary.FileWriter('/Users/vibhorj/python/-tf/g_mnist')
evt_file.add_graph(tf.get_default_graph())
s1 = tf.summary.scalar(name='accuracy', tensor=accuracy)
s2 = tf.summary.scalar(name='loss', tensor=loss)
m1 = tf.summary.merge([s1,s2])
5) run the session (test data is mnist.test.images & mnist.test.labels
with tf.Session() as sess:
sess.run(tf.variables_initializer(tf.global_variables()))
for i in range(300):
""" calc. accuracy on test data - TENSORBOARD before iteration beings """
summary = sess.run(m1, feed_dict=test_data)
evt_file.add_summary(summary, sess.run(ctr))
evt_file.flush()
""" fetch train data """
a_train, b_train = mnist.train.next_batch(batch_size=100)
train_data = {X: a_train , Y: b_train}
""" train """
sess.run(z, feed_dict = train_data)
Appreciate your time to provide any insight into it. I'm completely clueless hwo to proceed further (even tried initializing w & b with random_normal, played with learning rates [0.1,0.01, 0.001])
Cheers!

Please consider
Initializing biases to zeros
Using ReLU units instead of sigmoid - avoid saturation
Using Adam optimizer - faster learning
I feel that your network is quite large. You could do with a smaller network.
K,L,M,N=200,100,60,30
""" Layer 1 """
with tf.name_scope('L1'):
w1 = tf.Variable(initial_value = tf.truncated_normal([28*28,K],mean=0,stddev=0.1), name = 'w1')
b1 = tf.zeros([K])#tf.Variable(initial_value = tf.truncated_normal([K],mean=0,stddev=0.01), name = 'b1')
""" Layer 2 """
with tf.name_scope('L2'):
w2 = tf.Variable(initial_value =tf.truncated_normal([K,L],mean=0,stddev=0.1), name = 'w2')
b2 = tf.zeros([L])#tf.Variable(initial_value = tf.truncated_normal([L],mean=0,stddev=0.01), name = 'b2')
""" Layer 3 """
with tf.name_scope('L3'):
w3 = tf.Variable(initial_value = tf.truncated_normal([L,M],mean=0,stddev=0.1), name = 'w3')
b3 = tf.zeros([M]) #tf.Variable(initial_value = tf.truncated_normal([M],mean=0,stddev=0.01), name = 'b3')
""" Layer 4 """
with tf.name_scope('L4'):
w4 = tf.Variable(initial_value = tf.truncated_normal([M,N],mean=0,stddev=0.1), name = 'w4')
b4 = tf.zeros([N])#tf.Variable(initial_value = tf.truncated_normal([N],mean=0,stddev=0.1), name = 'b4')
""" Layer output """
with tf.name_scope('L_out'):
w_out = tf.Variable(initial_value = tf.truncated_normal([N,10],mean=0,stddev=0.1), name = 'w_out')
b_out = tf.zeros([10])#tf.Variable(initial_value = tf.truncated_normal([10],mean=0,stddev=0.1), name = 'b_out')
Y1 = tf.nn.relu(tf.add(tf.matmul(X,w1),b1), name='Y1')
Y2 = tf.nn.relu(tf.add(tf.matmul(Y1,w2),b2), name='Y2')
Y3 = tf.nn.relu(tf.add(tf.matmul(Y2,w3),b3), name='Y3')
Y4 = tf.nn.relu(tf.add(tf.matmul(Y3,w4),b4), name='Y4')
Y_pred_logits = tf.add(tf.matmul(Y4, w_out),b_out,name='logits')
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=Y_pred_logits, name='xentropy'))
opt = tf.train.GradientDescentOptimizer(0.01)
grads_and_vars = opt.compute_gradients(loss)
ctr = tf.Variable(0.0, name='ctr', trainable=False)
train_op = opt.minimize(loss, global_step=ctr)
for v in tf.trainable_variables():
print v.op.name
with tf.Session() as sess:
sess.run(tf.variables_initializer(tf.global_variables()))
for i in range(3000):
""" calc. accuracy on test data - TENSORBOARD before iteration beings """
#summary = sess.run(m1, feed_dict=test_data)
#evt_file.add_summary(summary, sess.run(ctr))
#evt_file.flush()
""" fetch train data """
a_train, b_train = mnist.train.next_batch(batch_size=100)
train_data = {X: a_train , Y: b_train}
""" train """
l = sess.run(loss, feed_dict = train_data)
print l
sess.run(train_op, feed_dict = train_data)

Related

Range of testing accuracy is large in CNN

I have trained my dataset's accuracy above 0.8.
But testing data's accuracy sometimes can be 0.7, and sometimes just 0.2.
(My training dataset and testing dataset are the same.)
What's wrong with my CNN model?
Thank you.
This is my training model.
inputs = tf.keras.layers.Input(shape=(64,64,3))
conv_layer1 = Conv2D(32,(3,3),padding='same')(inputs)
LR1 = LeakyReLU(alpha=0.3)(conv_layer1)
conv_layer2 = Conv2D(32,(3,3),padding='same')(LR1)
LR2 = LeakyReLU(alpha=0.3)(conv_layer2)
max_layer1 = MaxPooling2D(pool_size=(2,2))(LR2)
conv_layer3 = Conv2D(64,(3,3),padding='same')(max_layer1)
LR3 = LeakyReLU(alpha=0.3)(conv_layer3)
conv_layer4 = Conv2D(64,(3,3),padding='same')(LR3)
LR4 = LeakyReLU(alpha=0.3)(conv_layer4)
max_layer2 = MaxPooling2D(pool_size=(2,2))(LR4)
conv_layer5 = Conv2D(128,(3,3),padding='same',activation='relu')(max_layer2)
max_layer3 = MaxPooling2D(pool_size=(2,2))(conv_layer5)
conv_layer6 = Conv2D(256,(3,3),padding='same',activation='relu')(max_layer3)
max_layer4 = MaxPooling2D(pool_size=(2,2))(conv_layer6)
flatten = Flatten()(max_layer4)
dence2 = Dense(64,activation='relu')(flatten)
f1 = Dense(11, activation='softmax', name='prediction_one')(dence2)
f2 = Dense(11, activation='softmax', name='prediction_two')(dence2)
f3 = Dense(11, activation='softmax', name='prediction_third')(dence2)
model2 = Model(outputs=[f1,f2,f3], inputs=inputs)
model2.summary()
model2.compile(loss=['categorical_crossentropy','categorical_crossentropy','categorical_crossentropy'],optimizer='adam',metrics=['accuracy'])
history = model2.fit(X_train,[co1_train,co2_train,co3_train],64,epochs=10,validation_data=(X_valid,[co1_valid,co2_valid,co3_valid]))

Tensorflow: How to update only single variable at a time out of many variables based on conditions

k1 = tf.Variable(10.0)
k2 = tf.Variable(10.0)
pred = tf.pow(B, ?) / C
cost = tf.pow(pred_s1 - Y, 2)
optimizer = tf.train.AdamOptimizer(0.01).minimize(cost)
sess.run(optimizer, feed_dict{A:a, B:b, C:c})
Update:
pred = tf.pow(B, k1) / C if A == 0
pred = tf.pow(B, k2) / C if A == 1
Single prediction function which updates only one variable based on the value fed into placeholder 'A'
s1 = tf.Variable(tf.random_normal([1]))
s2 = tf.Variable(tf.random_normal([1]))
s3 = tf.Variable(tf.random_normal([1]))
s4 = tf.Variable(tf.random_normal([1]))
s5 = tf.Variable(tf.random_normal([1]))
D = tf.placeholder("float")
s2_s = tf.where(tf.logical_and(1.9<D,D<2.1),x=s2,y=s1)
s3_s = tf.where(tf.logical_and(2.9<D,D<3.1),x=s3,y=s2_s)
s4_s = tf.where(tf.logical_and(3.9<D,D<4.1),x=s4,y=s3_s)
s5_s = tf.where(tf.logical_and(4.9<D,D<5.1),x=s5,y=s4_s)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
print(sess.run([s1])[0], sess.run([s2])[0], sess.run([s3])[0], sess.run([s4])[0], sess.run([s5])[0])
print(sess.run(s5_s, feed_dict={D:5}))
sess.close()
Just use
pred = tf.pow(B, A*k2 + (1-A)* k1) / C
Which gives the switch. An alternative would be tf.where.

How to avoid dying weights/gradients in custom LSTM cell in tensorflow. What shall be ideal loss function?

I am trying to train a name generation LSTM network. I am not using pre-defined tensorflow cells (like tf.contrib.rnn.BasicLSTMCell, etc). I have created LSTM cell myself. But the error is not reducing beyond a limit. It only decreases 30% from what it is initially (when random weights were used in forward propagation) and then it starts increasing. Also, the gradients and weights become very small after few thousand training steps.
I think the reason for non-convergence can be one of two:
1. The design of tensorflow graph i have created OR
2. The loss function i used.
I am feeding one hot vectors of each character of the word for each time-step of the network. The code i have used for graph generation and loss function is as follows. Tx is the number of time steps in RNN, n_x,n_a,n_y are length of the input vectors, LSTM cell vector and output vector respectively.
Will be great if someone can help me in identifying what i am doing wrong here.
n_x = vocab_size
n_y = vocab_size
n_a = 100
Tx = 50
Ty = Tx
with open("trainingnames_file.txt") as f:
examples = f.readlines()
examples = [x.lower().strip() for x in examples]
X0 = [[char_to_ix[x1] for x1 in list(x)] for x in examples]
X1 = np.array([np.concatenate([np.array(x), np.zeros([Tx-len(x)])]) for x in X0], dtype=np.int32).T
Y0 = [(x[1:] + [char_to_ix["\n"]]) for x in X0]
Y1 = np.array([np.concatenate([np.array(y), np.zeros([Ty-len(y)])]) for y in Y0], dtype=np.int32).T
m = len(X0)
Wf = tf.get_variable(name="Wf", shape = [n_a,(n_a+n_x)])
Wu = tf.get_variable(name="Wu", shape = [n_a,(n_a+n_x)])
Wc = tf.get_variable(name="Wc", shape = [n_a,(n_a+n_x)])
Wo = tf.get_variable(name="Wo", shape = [n_a,(n_a+n_x)])
Wy = tf.get_variable(name="Wy", shape = [n_y,n_a])
bf = tf.get_variable(name="bf", shape = [n_a,1])
bu = tf.get_variable(name="bu", shape = [n_a,1])
bc = tf.get_variable(name="bc", shape = [n_a,1])
bo = tf.get_variable(name="bo", shape = [n_a,1])
by = tf.get_variable(name="by", shape = [n_y,1])
X_input = tf.placeholder(dtype = tf.int32, shape = [Tx,None])
Y_input = tf.placeholder(dtype = tf.int32, shape = [Ty,None])
X = tf.one_hot(X_input, axis = 0, depth = n_x)
Y = tf.one_hot(Y_input, axis = 0, depth = n_y)
X.shape
a_prev = tf.zeros(shape = [n_a,m])
c_prev = tf.zeros(shape = [n_a,m])
a_all = []
c_all = []
for i in range(Tx):
ac = tf.concat([a_prev,tf.squeeze(tf.slice(input_=X,begin=[0,i,0],size=[n_x,1,m]))], axis=0)
ct = tf.tanh(tf.matmul(Wc,ac) + bc)
tug = tf.sigmoid(tf.matmul(Wu,ac) + bu)
tfg = tf.sigmoid(tf.matmul(Wf,ac) + bf)
tog = tf.sigmoid(tf.matmul(Wo,ac) + bo)
c = tf.multiply(tug,ct) + tf.multiply(tfg,c_prev)
a = tf.multiply(tog,tf.tanh(c))
y = tf.nn.softmax(tf.matmul(Wy,a) + by, axis = 0)
a_all.append(a)
c_all.append(c)
a_prev = a
c_prev = c
y_ex = tf.expand_dims(y,axis=1)
if i == 0:
y_all = y_ex
else:
y_all = tf.concat([y_all,y_ex], axis=1)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=Y,logits=y_all,dim=0))
opt = tf.train.AdamOptimizer()
train = opt.minimize(loss)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
o = sess.run(loss, feed_dict = {X_input:X1,Y_input:Y1})
print(o.shape)
print(o)
sess.run(train, feed_dict = {X_input:X1,Y_input:Y1})
o = sess.run(loss, feed_dict = {X_input:X1,Y_input:Y1})
print(o)

How to deal with many columns in Tensorflow

I am studying Tensorflow, and I have a question.
Original code is that
Columns = ['size' , 'room', 'price']
x1 = tf.Variable(np.array(columns['size']).astype(np.float32))
x2 = tf.Variable(np.array(columns['room']).astype(np.float32))
y = tf.Variable(np.array(columns['price']).astype(np.float32))enter code here
train_X1 = np.asarray([i[1] for i in data.loc[:,['size']].to_records()],dtype="float")
train_X2 = np.asarray([i[1] for i in data.loc[:,['room']].to_records()],dtype="float")
train_X = np.asarray([i[1] for i in data.loc[:,'size':'room'].to_records()],dtype="float")
train_Y = np.asarray([i[1] for i in data.loc[:,['price']].to_records()],dtype="float")
n_samples = train_X.shape[0]
X1 = tf.placeholder("float")
X2 = tf.placeholder("float")
Y = tf.placeholder("float")
W1 = tf.Variable(rng.randn(), name="weight1")
W2 = tf.Variable(rng.randn(), name="weight2")
b = tf.Variable(rng.randn(), name="bias")
sum_list = [tf.multiply(X1,W1),tf.multiply(X2,W2)]
pred_X = tf.add_n(sum_list)
pred = tf.add(pred_X,b)
cost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*n_samples)
If I have many columns like that
Columns = ['price','lotsize','bedrooms','bathrms', 'stories', 'garagepl', 'driveway', 'recroom', \
'fullbase', 'gashw', 'airco', 'prefarea']
How do i deal with many columns in Tensorflow?
(Independent variable = 'price', dependent variable = else)
Do I have to make each train_set and W with columns?

Tensorflow - ValueError: Cannot feed value of shape

I have 19 input integer features. Output and labels is 1 or 0. I examine MNIST example from tensorflow website.
My code is here :
validation_images, validation_labels, train_images, train_labels = ld.read_data_set()
print "\n"
print len(train_images[0])
print len(train_labels)
import tensorflow as tf
sess = tf.InteractiveSession()
x = tf.placeholder(tf.float32, shape=[None, 19])
y_ = tf.placeholder(tf.float32, shape=[None, 2])
W = tf.Variable(tf.zeros([19,2]))
b = tf.Variable(tf.zeros([2]))
sess.run(tf.initialize_all_variables())
y = tf.nn.softmax(tf.matmul(x,W) + b)
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
start = 0
batch_1 = 50
end = 100
for i in range(1000):
#batch = mnist.train.next_batch(50)
x1 = train_images[start:end]
y1 = train_labels[start:end]
start = start + batch_1
end = end + batch_1
x1 = np.reshape(x1, (-1, 19))
y1 = np.reshape(y1, (-1, 2))
train_step.run(feed_dict={x: x1[0], y_: y1[0]})
I run upper code, I get an error. The compiler says that
% (np_val.shape, subfeed_t.name, str(subfeed_t.get_shape())))
ValueError: Cannot feed value of shape (19,) for Tensor u'Placeholder:0', which has shape '(?, 19)'
How can I handle this error?
Try
train_step.run(feed_dict={x: x1, y_: y1})
You can reshape your feed's value by the following code:
x1 = np.column_stack((x1))
x1 = np.transpose(x1) # if necessary
Thus, the shape of the input value will be (1, 19) instead of (19,)