Using attention in a CNN attention model - tensorflow

I am very new to python and ML.
I have a working CNN LSTM model as below:
def model_demo():
inp = Input(shape=(6000,3), name='input')
e = Conv1D(16, 9 , strides =1, padding = 'same', activation='relu')(inp)
e = BatchNormalization()(e)
e = Activation('relu')(e)
e = Dropout(dropout_rate_cnn)(e)
e = LSTM(32, return_sequences=True, unroll=True)(e)
e = Dropout(0.7)(UNIlstm)
e = BatchNormalization()(UNIlstm)
e = TimeDistributed(Dense(64, kernel_regularizer=l1(0.01), activation='relu'))(e)
e = BatchNormalization()(e)
e = Dropout(0.7)(e)
e = TimeDistributed(Dense(1, kernel_regularizer=l1(0.01), activation='sigmoid'))(e)
out_model = Model(inputs=inp, outputs=e) #e.shape(6000,1)
I want to add attention to it that is provided by Tensorflow. I am unsure about the inputs of this attention layer and whether to have a CNN LSTM Attention model or replace LSTM with Attention.
I tried to add Attnetion after the LSTM layer with the input as [e[0],e[1]] but this contributes 0 parameters to the model and has a model summary as (not sure if this is the right way):
tf.__operators__.getitem (Slic (6000, 32) 0 ['lstm[0][0]']
ingOpLambda)
tf.__operators__.getitem_1 (Sl (6000, 32) 0 ['lstm[0][0]']
icingOpLambda)
attention (Attention) (6000, 32) 0 ['tf.__operators__.getitem[0][0]'
, 'tf.__operators__.getitem_1[0][
0]']
dropout_1 (Dropout) (6000, 32) 0 ['attention[0][0]']

Related

Range of testing accuracy is large in CNN

I have trained my dataset's accuracy above 0.8.
But testing data's accuracy sometimes can be 0.7, and sometimes just 0.2.
(My training dataset and testing dataset are the same.)
What's wrong with my CNN model?
Thank you.
This is my training model.
inputs = tf.keras.layers.Input(shape=(64,64,3))
conv_layer1 = Conv2D(32,(3,3),padding='same')(inputs)
LR1 = LeakyReLU(alpha=0.3)(conv_layer1)
conv_layer2 = Conv2D(32,(3,3),padding='same')(LR1)
LR2 = LeakyReLU(alpha=0.3)(conv_layer2)
max_layer1 = MaxPooling2D(pool_size=(2,2))(LR2)
conv_layer3 = Conv2D(64,(3,3),padding='same')(max_layer1)
LR3 = LeakyReLU(alpha=0.3)(conv_layer3)
conv_layer4 = Conv2D(64,(3,3),padding='same')(LR3)
LR4 = LeakyReLU(alpha=0.3)(conv_layer4)
max_layer2 = MaxPooling2D(pool_size=(2,2))(LR4)
conv_layer5 = Conv2D(128,(3,3),padding='same',activation='relu')(max_layer2)
max_layer3 = MaxPooling2D(pool_size=(2,2))(conv_layer5)
conv_layer6 = Conv2D(256,(3,3),padding='same',activation='relu')(max_layer3)
max_layer4 = MaxPooling2D(pool_size=(2,2))(conv_layer6)
flatten = Flatten()(max_layer4)
dence2 = Dense(64,activation='relu')(flatten)
f1 = Dense(11, activation='softmax', name='prediction_one')(dence2)
f2 = Dense(11, activation='softmax', name='prediction_two')(dence2)
f3 = Dense(11, activation='softmax', name='prediction_third')(dence2)
model2 = Model(outputs=[f1,f2,f3], inputs=inputs)
model2.summary()
model2.compile(loss=['categorical_crossentropy','categorical_crossentropy','categorical_crossentropy'],optimizer='adam',metrics=['accuracy'])
history = model2.fit(X_train,[co1_train,co2_train,co3_train],64,epochs=10,validation_data=(X_valid,[co1_valid,co2_valid,co3_valid]))

TensorFlow get stuck after use concatenate layer

I have the next model:
import tensorflow as tf
input1 = tf.keras.layers.Input(shape = (10, 300, 1))
input2 = tf.keras.layers.Input(shape = (24, ))
x = tf.keras.layers.Conv2D(64, (3,3), activation='relu')(input1)
x = tf.keras.layers.MaxPooling2D(2,2)(x)
x = tf.keras.layers.Dropout(0.25)(x)
x = tf.keras.layers.Conv2D(128, (2,2), activation='relu')(x)
x = tf.keras.layers.MaxPooling2D(2,2)(x)
x = tf.keras.layers.Dropout(0.25)(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(512, activation = 'relu')(x)
x = tf.keras.layers.Dropout(0.25)(x)
x = tf.keras.layers.Concatenate()([x, input2])
x = tf.keras.layers.Dense(128, activation = 'relu')(x)
x = tf.keras.layers.Dropout(0.25)(x)
output = tf.keras.layers.Dense(1, activation='sigmoid')(x)
model = tf.keras.models.Model(inputs = [input1,input2], outputs = output)
model.summary()
model.compile(optimizer = 'rmsprop',
loss ='binary_crossentropy',
metrics = ['acc'])
history = model.fit([X_train, X_features], y_train,
batch_size=64,
epochs=100)
But when I try to fit it, get stuck and only appears Epoch 1/100 and nothing more happens even if i let it run for hours. But when I remove the concatenate layer, everything go well. I'm using Google colab. Why is this happening?

Add extra kernel to a CNN layer while maintaining the weights learned for the other kernels

I'm training a simple feed forward conv neural network on the cifar10 dataset. After running a few epochs, I want to increase the kernel count in the 2nd conv layer from 16 to some number k.
How do I do this while keeping the trained weights for the other kernels and layers in the model intact?
def conv_layer(inp, fltrs):
inp = Conv2D(filters = fltrs, kernel_size = 3, strides = 1, padding = 'valid')(inp)
inp = BatchNormalization()(inp)
inp = Dropout(0.25)(inp)
inp = Activation('relu')(inp)
return inp
inp = Input(shape = (32, 32, 3))
x0 = conv_layer(inp, 8)
x1 = conv_layer(x0, 16)
x2 = MaxPooling2D(pool_size= 2, strides=None, padding='valid', data_format=None)(x1)
x3 = conv_layer(x2, 32)
x4 = conv_layer(x3, 48)
x5 = conv_layer(x4, 64)
x6 = MaxPooling2D(pool_size= 2, strides=None, padding='valid', data_format=None)(x5)
x7 = Flatten()(x6)
x8 = Dense(512)(x7)
x9 = BatchNormalization()(x8)
x10 = Dropout(0.25)(x9)
x11 = Activation('relu')(x10)
x12 = Dense(num_classes, activation='softmax')(x11)
model = Model(inputs = [inp], outputs = [x12])

How to avoid dying weights/gradients in custom LSTM cell in tensorflow. What shall be ideal loss function?

I am trying to train a name generation LSTM network. I am not using pre-defined tensorflow cells (like tf.contrib.rnn.BasicLSTMCell, etc). I have created LSTM cell myself. But the error is not reducing beyond a limit. It only decreases 30% from what it is initially (when random weights were used in forward propagation) and then it starts increasing. Also, the gradients and weights become very small after few thousand training steps.
I think the reason for non-convergence can be one of two:
1. The design of tensorflow graph i have created OR
2. The loss function i used.
I am feeding one hot vectors of each character of the word for each time-step of the network. The code i have used for graph generation and loss function is as follows. Tx is the number of time steps in RNN, n_x,n_a,n_y are length of the input vectors, LSTM cell vector and output vector respectively.
Will be great if someone can help me in identifying what i am doing wrong here.
n_x = vocab_size
n_y = vocab_size
n_a = 100
Tx = 50
Ty = Tx
with open("trainingnames_file.txt") as f:
examples = f.readlines()
examples = [x.lower().strip() for x in examples]
X0 = [[char_to_ix[x1] for x1 in list(x)] for x in examples]
X1 = np.array([np.concatenate([np.array(x), np.zeros([Tx-len(x)])]) for x in X0], dtype=np.int32).T
Y0 = [(x[1:] + [char_to_ix["\n"]]) for x in X0]
Y1 = np.array([np.concatenate([np.array(y), np.zeros([Ty-len(y)])]) for y in Y0], dtype=np.int32).T
m = len(X0)
Wf = tf.get_variable(name="Wf", shape = [n_a,(n_a+n_x)])
Wu = tf.get_variable(name="Wu", shape = [n_a,(n_a+n_x)])
Wc = tf.get_variable(name="Wc", shape = [n_a,(n_a+n_x)])
Wo = tf.get_variable(name="Wo", shape = [n_a,(n_a+n_x)])
Wy = tf.get_variable(name="Wy", shape = [n_y,n_a])
bf = tf.get_variable(name="bf", shape = [n_a,1])
bu = tf.get_variable(name="bu", shape = [n_a,1])
bc = tf.get_variable(name="bc", shape = [n_a,1])
bo = tf.get_variable(name="bo", shape = [n_a,1])
by = tf.get_variable(name="by", shape = [n_y,1])
X_input = tf.placeholder(dtype = tf.int32, shape = [Tx,None])
Y_input = tf.placeholder(dtype = tf.int32, shape = [Ty,None])
X = tf.one_hot(X_input, axis = 0, depth = n_x)
Y = tf.one_hot(Y_input, axis = 0, depth = n_y)
X.shape
a_prev = tf.zeros(shape = [n_a,m])
c_prev = tf.zeros(shape = [n_a,m])
a_all = []
c_all = []
for i in range(Tx):
ac = tf.concat([a_prev,tf.squeeze(tf.slice(input_=X,begin=[0,i,0],size=[n_x,1,m]))], axis=0)
ct = tf.tanh(tf.matmul(Wc,ac) + bc)
tug = tf.sigmoid(tf.matmul(Wu,ac) + bu)
tfg = tf.sigmoid(tf.matmul(Wf,ac) + bf)
tog = tf.sigmoid(tf.matmul(Wo,ac) + bo)
c = tf.multiply(tug,ct) + tf.multiply(tfg,c_prev)
a = tf.multiply(tog,tf.tanh(c))
y = tf.nn.softmax(tf.matmul(Wy,a) + by, axis = 0)
a_all.append(a)
c_all.append(c)
a_prev = a
c_prev = c
y_ex = tf.expand_dims(y,axis=1)
if i == 0:
y_all = y_ex
else:
y_all = tf.concat([y_all,y_ex], axis=1)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=Y,logits=y_all,dim=0))
opt = tf.train.AdamOptimizer()
train = opt.minimize(loss)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
o = sess.run(loss, feed_dict = {X_input:X1,Y_input:Y1})
print(o.shape)
print(o)
sess.run(train, feed_dict = {X_input:X1,Y_input:Y1})
o = sess.run(loss, feed_dict = {X_input:X1,Y_input:Y1})
print(o)

Tensorflow - ValueError: Cannot feed value of shape

I have 19 input integer features. Output and labels is 1 or 0. I examine MNIST example from tensorflow website.
My code is here :
validation_images, validation_labels, train_images, train_labels = ld.read_data_set()
print "\n"
print len(train_images[0])
print len(train_labels)
import tensorflow as tf
sess = tf.InteractiveSession()
x = tf.placeholder(tf.float32, shape=[None, 19])
y_ = tf.placeholder(tf.float32, shape=[None, 2])
W = tf.Variable(tf.zeros([19,2]))
b = tf.Variable(tf.zeros([2]))
sess.run(tf.initialize_all_variables())
y = tf.nn.softmax(tf.matmul(x,W) + b)
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
start = 0
batch_1 = 50
end = 100
for i in range(1000):
#batch = mnist.train.next_batch(50)
x1 = train_images[start:end]
y1 = train_labels[start:end]
start = start + batch_1
end = end + batch_1
x1 = np.reshape(x1, (-1, 19))
y1 = np.reshape(y1, (-1, 2))
train_step.run(feed_dict={x: x1[0], y_: y1[0]})
I run upper code, I get an error. The compiler says that
% (np_val.shape, subfeed_t.name, str(subfeed_t.get_shape())))
ValueError: Cannot feed value of shape (19,) for Tensor u'Placeholder:0', which has shape '(?, 19)'
How can I handle this error?
Try
train_step.run(feed_dict={x: x1, y_: y1})
You can reshape your feed's value by the following code:
x1 = np.column_stack((x1))
x1 = np.transpose(x1) # if necessary
Thus, the shape of the input value will be (1, 19) instead of (19,)