ValueError during tensorflow graph construction - tensorflow

I'm trying to train a GAN using tensorflow, but during graph construction I'm getting this error:
ValueError: Input 0 of layer conv1_1 is incompatible with the layer: its rank is undefined, but the layer requires a defined rank.
I'm guessing this is because my discriminator and generator are within different functions which I'm calling. I would ideally want to avoid rolling my entire architecture out of methods, as this would lead to a tediously-long python file.
I've tried using a default value for the placeholders before the first training examples are pushed through a sess.run() call, but this led to the same example being run through the graph at each stage of training (probably because that's how tensorflow constructed the graph)
My training loop code is below. Please let me know if seeing the generator and discriminator functions themselves would help.
img1 = tf.placeholder(dtype = tf.float32)
img2 = tf.placeholder(dtype = tf.float32)
lr = tf.placeholder(dtype = tf.float32)
synthetic_imgs, synthetic_logits, mse, _ = self.gen(img1)
fake_result, fake_logits_hr, fake_feature_2 = self.disc(img1, synthetic_imgs)
ground_truth_result, ground_truth_logits_hr, truth_feature_2 = self.disc(img1, img2)
_, fake_logits_lr = self.disc_two(img1, synthetic_imgs)
_, ground_truth_logits_lr = self.disc_two(img1, img2)
a = tf.nn.sigmoid_cross_entropy_with_logits
dis_labels = tf.random.uniform((self.batch_size, 1), minval = -0.2, maxval = 0.3)
gen_labels = tf.random.uniform((self.batch_size, 1), minval = 0.75, maxval = 1.2)
dis_loss = #Discriminator Loss
gen_loss = #Generator Loss
#May want to change to -log(MSE)
d_vars = [var for var in tf.trainable_variables() if 'disc' in var.name]
g_vars = [var for var in tf.trainable_variables() if 'g_' in var.name]
dis_loss = tf.reduce_mean(dis_loss)
gen_loss = tf.reduce_mean(gen_loss)
with tf.variable_scope('optimizers', reuse = tf.AUTO_REUSE) as scope:
gen_opt = tf.train.AdamOptimizer(learning_rate = lr, name = 'gen_opt')
disc_opt = tf.train.AdamOptimizer(learning_rate = lr, name = 'dis_opt')
gen1 = gen_opt.minimize(gen_loss, var_list = g_vars)
disc1 = disc_opt.minimize(dis_loss, var_list = d_vars)
#Tensorboard code for visualizing gradients
#global_step = variable_scope.get_variable("global_step", [], trainable=False, dtype=dtypes.int64, initializer=init_ops.constant_initializer(0, dtype=dtypes.int64))
# gen_training = tf.contrib.layers.optimize_loss(gen_loss, global_step, learning_rate=lr, optimizer=gen_opt, summaries=["gradients"], variables = g_vars)
# disc_training = tf.contrib.layers.optimize_loss(dis_loss, global_step, learning_rate=lr, optimizer=disc_opt, summaries=["gradients"], variables = d_vars)
#summary = tf.summary.merge_all()
with tf.Session() as sess:
print('start session')
sess.run(tf.global_variables_initializer())
#print(tf.trainable_variables()) #Find variable corresponding to conv filter weights, which you can use for tensorboard visualization
#Code to load each training example
gen = self.pairs()
for i in range(self.num_epochs):
print(str(i+ 1) + 'th epoch')
for j in range(self.num_batches):
i_1 = None
i_2 = None
#Crektes batch
for k in range(self.batch_size):
p = next(gen)
try:
i_1 = np.concatenate((i1, self.load_img(p[0])), axis = 0)
i_2 = np.concatenate((i2, self.load_img(p[1])), axis = 0)
except Exception:
i_1 = self.load_img(p[0])
i_2 = self.load_img(p[1])
l_r = 8e-4 * (0.5)**(i//100) #Play around with this value
test, gLoss, _ = sess.run([img1, gen_loss, gen1], feed_dict = {i1 : i_1, i2 : i_2, learn_rate : l_r})
dLoss, _ = sess.run([dis_loss, disc1], feed_dict = {i1 : i_1, i2 : i_2, learn_rate : l_r})
print(test.shape)
cv2.imwrite('./saved_imgs/gan_test'+str(j)+'.png', np.squeeze(test, axis = 3)[0])
#Code to display gradients and other relevant stats on tensorboard
#Will be under histogram tab, labelled OptimizeLoss
# if j%500 == 0:
# writer = tf.summary.FileWriter(sess.graph, logdir = './tensorboard/1') #Change logdir for each run
# summary_str = sess.run(summary, feed_dict = {i1 : i_1, i2 : i_2, learn_rate : l_r})
# writer.add_summmary(summary_str, str(i)+': '+str(j))#Can change to one epoch only if necessary
# writer.flush()
if j % 12 == 0: #Prints loss statistics every 12th batch
#print('Epoch: '+str(i))
print('Generator Loss: '+str(gLoss))
print('Discriminator Loss: '+str(dLoss))
self.save_model(sess, i)

Related

The val_loss is nan, but loss is printing. Both train and validation losses are nan in model.evaluate(), and the acc improves during training

There is a 2-class classification problem, and my loss function is custom. The labels are categorical, and the final activation function is Softmax. During the training, the loss is printed, but the val_loss is nan(inf). Using model.evaluate(X_train,Y_train) at the end of training, the train loss is the same as the vaidation loss, and both are nan. 
This is my custom loss function.
def custom_loss(y_true, y_pred):
import tensorflow as tf
bce = tf.keras.losses.BinaryCrossentropy(
from_logits=False,
label_smoothing=0.0,
axis=-1,
reduction="none",
name="binary_crossentropy",
)
intra = tf.constant(1, dtype=tf.float64)
inter = tf.constant(0.01, dtype=tf.float64)
zeros = tf.gather_nd(y_pred,tf.where(tf.argmin(y_true, axis = 1)))
ones = tf.gather_nd(y_pred,tf.where(tf.argmax(y_true, axis = 1)))
centroid_zero = tf.reduce_mean(zeros,0)
centroid_one = tf.reduce_mean(ones,0)
loss_zero_intra = tf.math.squared_difference(zeros,centroid_zero)
loss_one_intra = tf.math.squared_difference(ones,centroid_zero)
loss_zero_intra = tf.cast(loss_zero_intra, tf.float64)
loss_one_intra = tf.cast(loss_one_intra, tf.float64)
loss_intra = tf.zeros_like(y_pred, tf.float64)
loss_intra = tf.tensor_scatter_nd_update(loss_intra,tf.where(tf.argmin(y_true, axis = 1)),loss_zero_intra)
loss_intra = tf.tensor_scatter_nd_update(loss_intra,tf.where(tf.argmax(y_true, axis = 1)),loss_one_intra)
loss_inter_value = tf.math.sqrt(tf.math.squared_difference(centroid_zero[0],centroid_one[0]) +
tf.math.squared_difference(centroid_zero[1],centroid_one[1]))
loss_inter = tf.fill(tf.shape(y_pred),loss_inter_value)
binary_cross_entropy= tf.tile(tf.expand_dims(bce(y_true,y_pred),axis=1),
tf.constant([1,2],tf.int32))
loss_intra = tf.cast(loss_intra, tf.float64)
loss_inter = tf.cast(loss_inter, tf.float64)
binary_cross_entropy= tf.cast(binary_cross_entropy, tf.float64)
loss = tf.math.multiply(intra, loss_intra) - tf.math.multiply(inter, loss_inter) + binary_cross_entropy
return loss
And Also you can see my model code here:
def create_model(kernelLength = 32, nb_classes = 2, Chans = 19, Samples = 512,
dropoutRate = 0.5 , F1 = 8, D = 2, F2 = 16, norm_rate = 0.25,
dropoutType = 'Dropout', optimizer_type = 'Adam', lr=0.0005, **kwargs):
K.clear_session()
gc.collect()
if dropoutType == 'SpatialDropout2D':
dropoutType = SpatialDropout2D
elif dropoutType == 'Dropout':
dropoutType = Dropout
else:
raise ValueError('dropoutType must be one of SpatialDropout2D '
'or Dropout, passed as a string.')
input1 = Input(shape = (1, Chans, Samples))
block1 = Conv2D(F1, (1, kernelLength), padding = 'same',
input_shape = (1, Chans, Samples),
use_bias = False)(input1)
block1 = BatchNormalization(axis = 1)(block1)
block1 = DepthwiseConv2D((Chans, 1), use_bias = False,
depth_multiplier = D,
depthwise_constraint = max_norm(1.))(block1)
block1 = BatchNormalization(axis = 1)(block1)
block1 = Activation('elu')(block1)
block1 = AveragePooling2D((1, 4))(block1)
block1 = dropoutType(dropoutRate)(block1)
block2 = SeparableConv2D(F2, (1, 16),
use_bias = False, padding = 'same')(block1)
block2 = BatchNormalization(axis = 1)(block2)
block2 = Activation('elu')(block2)
block2 = AveragePooling2D((1, 8))(block2)
block2 = dropoutType(dropoutRate)(block2)
flatten = Flatten(name = 'flatten')(block2)
dense = Dense(nb_classes, name = 'dense',
kernel_constraint = max_norm(norm_rate))(flatten)
softmax = Activation('softmax', name = 'softmax')(dense)
model = Model(inputs=input1, outputs=softmax)
if optimizer_type == 'Adam':
optimizer = Adam(learning_rate = lr)
if optimizer_type == 'Adamax':
optimizer = Adamax(learning_rate = lr)
if optimizer_type == 'AdamW':
optimizer = AdamW(learning_rate = lr)
model.compile(loss=custom_loss, optimizer=optimizer, metrics = ['accuracy'])
return model
The custom_loss function returns three distinct terms. One of them is the binary_cross_entropy. The model works fine with this term, which works the same as binary_cross_entropy in Keras. Therefore, there is no problem with the data. The train and validation accuracy improves throughout training, and the train loss decreases. The number of validation samples is the same as the number of train samples.
After the training was accomplished, by using the model.evaluation(X,Y) function, the loss was shown as "nan," however calculating the loss using the custom loss function, resulting in a "number" not a "nan".
Increasing the batch size, scaling the data, and using clipnorm or clipvalue within the optimizer all had no effect. Furthermore, no nan appears in the model predictions (y_pred).I suspect that the problem is caused by the below extreme value inside the model prediction: An example of model prediction with extremes:
Can anyone suggest a solution to this problem?
Thanks in advance.

How to test a trained model using tensorflow for image classification

I created a simple image classification network using tensor flow and trained it successfully. But while testing the model with same input image I am getting different prediction results. Details are given below
Only two classes (namely dog and cat) are present in the dataset. Converted the dataset to tfrecord before using them. The network architecture is shown below.
def conv_layer(input, channels_in, fileter_size,channels_out,name="conv"):
with tf.name_scope(name):
w = tf.Variable(tf.truncated_normal([fileter_size,fileter_size,channels_in,channels_out],stddev =0.1),name = name + "/W")
b = tf.Variable(tf.constant(0.1,shape= [channels_out]),name= name +"/B")
conv = tf.nn.conv2d(input,w,strides=[1,1,1,1],padding = "SAME")
ret = tf.nn.max_pool( (conv + b),ksize = [1,2,2,1], strides = [1,2,2,1],padding = "SAME")
return(ret)
def fc_layer(input,channels_in,channels_out,name = "fc"):
with tf.name_scope(name):
w = tf.Variable(tf.truncated_normal([channels_in,channels_out],stddev =0.1),name = name +"/W")
b = tf.Variable(tf.constant(0.1,shape= [channels_out]),name= name +"/B")
mul = tf.matmul(input,w)
ret = tf.add(mul,b, name = "logits")
return(ret)
def inference(image_batch):
fc1_size = 128
num_classes = 2
conv1 = conv_layer(image_batch,3,3,32,"conv1")
conv2 = conv_layer(conv1,32,3,32,"conv2")
conv3 = conv_layer(conv2,32,3,64,"conv3")
shape = conv3.get_shape()
#shape = tf.shape(conv3)
flat = tf.reshape(conv3,shape = [-1,shape[1:4].num_elements()])
fc1 = fc_layer(flat,shape[1:4].num_elements(),fc1_size,"fc1")
logits = fc_layer(fc1,fc1_size,num_classes,"fc2")
return(logits)
Training of model is done by following code
label, image = ReadData.read_and_decode_single_example(["trian.tfrecords"],2,128*128*3)
label = tf.argmax(label)
image = tf.cast(image, tf.float32)
# groups examples into batches randomly
image = tf.reshape(image,shape = [128,128,3])
image = tf.multiply(image,1.0/255,name = "in_image")#np.multiply(image, 1.0 / 255.0)
image_batch_ph = tf.placeholder(tf.float32,shape = [None,128,128,3],name = "image_batch_ph")
label_batch_ph = tf.placeholder(tf.int64,shape = [None],name = "label_batch_ph")
images_batch, labels_batch = tf.train.shuffle_batch(
[image, label], batch_size=32,
capacity=2000,
min_after_dequeue=1000,name = "shuffle_step")
predict = inference(image_batch_ph)
y_pred = tf.nn.softmax(predict,name='y_pred')
loss = tf.nn.sparse_softmax_cross_entropy_with_logits( labels = label_batch_ph,logits = predict)
train_op = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(loss)
loss_mean = tf.reduce_mean(loss)
correct_prediction = tf.equal(tf.argmax(predict,1),label_batch_ph)
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
sess = tf.Session()
init = tf.global_variables_initializer()
saver = tf.train.Saver()
sess.run(init)
tf.train.start_queue_runners(sess=sess)
i = 0
while i < 10000:
#labels = sess.run(labels_batch)
#print labels
imgs,lbs = sess.run([images_batch,labels_batch])
_,lossMean = sess.run([train_op,loss_mean],feed_dict={image_batch_ph:imgs,label_batch_ph:lbs})
if i % 1000 == 0:
print "iteration ",i,"Loss :",lossMean
if i%2000 == 0:
acc = sess.run([accuracy],feed_dict={image_batch_ph:imgs,label_batch_ph:lbs})
print "iteration ",i,"accuracy :",acc
i += 1
save_path = saver.save(sess, "./model/model.ckpt")
print( "model is saved at %s",save_path)
"ReadData.read_and_decode_single_example()" function return a single image tensor and corresponding label tensor. The current session (trained model) is saved in the folder ./model
For testing, I wrote another script shown below. My intention is to load the session saved by above script and use the model to classify an image.
import cv2
image_size = 128
image = cv2.imread("./dog.11.jpg")
image = cv2.resize(image, (image_size, image_size),0,0, cv2.INTER_LINEAR)
image = tf.cast(image, tf.float32)
image = tf.reshape(image,shape = [128,128,3])
image = tf.multiply(image,1.0/255,name = "in_image")#np.multiply(image, 1.0 / 255.0)
image = tf.expand_dims(image, 0)
sess = tf.Session()
new_saver = tf.train.import_meta_graph('./model/model.ckpt.meta')
new_saver.restore(sess, tf.train.latest_checkpoint('./model/'))
init = tf.global_variables_initializer()
sess.run(init)
image_batch_ph = tf.get_default_graph().get_tensor_by_name("image_batch_ph:0")
test_image = sess.run(image)
y_pred = tf.get_default_graph().get_tensor_by_name("y_pred:0")
predicted_labels = sess.run(y_pred,feed_dict={image_batch_ph:test_image})
print predicted_labels
While testing the model with same image multiple time prediction results are different. I am not able to figure out what went wrong.
Mistake was mine. While using a pre-trained model from check point you don't need
init = tf.global_variables_initializer()
sess.run(init)
This will initialize the weights with random new values.

TensorFlow: loss jumps up after restoring RNN net

Environment info
Operating System: Windows 7 64-bit
Tensorflow installed from pre-built pip (no CUDA): 1.0.1
Python 3.5.2 64-bit
Problem
I have problems with restoring my net (RNN character base language model). Below is a simplified version with the same problem.
When I run it the first time, I get, for example, this.
...
step 160: loss = 1.956 (perplexity = 7.069016620211226)
step 180: loss = 1.837 (perplexity = 6.274748642468816)
step 200: loss = 1.825 (perplexity = 6.202084762557817)
But on the second run, after restoring parameters, I get this.
step 220: loss = 2.346 (perplexity = 10.446611983898903)
step 240: loss = 2.346 (perplexity = 10.446709120339545)
...
All the tf variables seem to be correctly restored, including the state, which will be fed to RNN.
Data position is also restored (from 'step').
I also made a similar program for MNIST recognition model, and this one works fine: the losses before and after the restoring are continuous.
Are there any other parameters or states that should be saved and restored?
import argparse
import os
import tensorflow as tf
import numpy as np
import math
B = 20 # batch size
H = 200 # size of hidden layer of neurons
T = 25 # number of time steps to unroll the RNN for
data_file = 'ptb.train.txt' # any plain text file will do
checkpoint_dir = "tmp"
#----------------
# prepare data
#----------------
data = open(data_file, 'r').read()
chars = list(set(data))
data_size, vocab_size = len(data), len(chars)
print('data has {0} characters, {1} unique.'.format(data_size, vocab_size))
char_to_ix = { ch:i for i,ch in enumerate(chars) }
ix_to_char = { i:ch for i,ch in enumerate(chars) }
input_index_raw = np.array([char_to_ix[ch] for ch in data])
input_index_raw = input_index_raw[0:len(input_index_raw) // T * T]
input_index_raw_shift = np.append(input_index_raw[1:], input_index_raw[0])
input_all = input_index_raw.reshape([-1, T])
target_all = input_index_raw_shift.reshape([-1, T])
num_packed_data = len(input_all)
#----------------
# build model
#----------------
class Model(object):
def __init__(self):
self.input_ph = tf.placeholder(tf.int32, [None, T], name="input_ph")
self.target_ph = tf.placeholder(tf.int32, [None, T], name="target_ph")
embedding = tf.get_variable("embedding", [vocab_size, H], initializer=tf.random_normal_initializer(), dtype=tf.float32)
# input_ph is B x T.
# input_embedded is B x T x H.
input_embedded = tf.nn.embedding_lookup(embedding, self.input_ph)
cell = tf.contrib.rnn.BasicRNNCell(H)
self.state_ph = tf.placeholder(tf.float32, (None, cell.state_size), name="state_ph")
# Make state variable so that it will be saved by the saver.
self.state = tf.get_variable("state", (B, cell.state_size), initializer=tf.zeros_initializer(), trainable=False, dtype=tf.float32)
# Construct initial_state according to whether restoring or not.
self.isRestore = tf.placeholder(tf.bool, shape=(), name="isRestore")
zero_state = cell.zero_state(B, dtype=tf.float32)
self.initial_state = tf.cond(self.isRestore, lambda: self.state, lambda: zero_state)
# input_embedded : B x T x H
# output: B x T x H
# state : B x cell.state_size
output, state_ = tf.nn.dynamic_rnn(cell, input_embedded, initial_state=self.state_ph)
self.final_state = tf.assign(self.state, state_)
# reshape to (B * T) x H.
output_flat = tf.reshape(output, [-1, H])
# Convert hidden layer's output to vector of logits for each vocabulary.
softmax_w = tf.get_variable("softmax_w", [H, vocab_size], dtype=tf.float32)
softmax_b = tf.get_variable("softmax_b", [vocab_size], dtype=tf.float32)
logits = tf.matmul(output_flat, softmax_w) + softmax_b
# cross_entropy is a vector of length B * T
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.reshape(self.target_ph, [-1]), logits=logits)
self.loss = tf.reduce_mean(cross_entropy)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
self.global_step = tf.get_variable("global_step", (), initializer=tf.zeros_initializer(), trainable=False, dtype=tf.int32)
self.training_op = optimizer.minimize(cross_entropy, global_step=self.global_step)
def train_batch(self, sess, input_batch, target_batch, initial_state):
final_state_, _, final_loss = sess.run([self.final_state, self.training_op, self.loss], feed_dict={self.input_ph: input_batch, self.target_ph: target_batch, self.state_ph: initial_state})
return final_state_, final_loss
# main
with tf.Session() as sess:
if not tf.gfile.Exists(checkpoint_dir):
tf.gfile.MakeDirs(checkpoint_dir)
batch_stride = num_packed_data // B
# make model
model = Model()
saver = tf.train.Saver()
# always initialize
init = tf.global_variables_initializer()
init.run()
# restore if necessary
isRestore = False
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt:
isRestore = True
last_model = ckpt.model_checkpoint_path
print("Loading " + last_model)
saver.restore(sess, last_model)
# set initial step
step = tf.train.global_step(sess, model.global_step) + 1
print("start step = {0}".format(step))
# fetch initial state
state = sess.run(model.initial_state, feed_dict={model.isRestore: isRestore})
print("Initial state: {0}".format(state))
while True:
# prepare batch data
idx = [(step + x * batch_stride) % num_packed_data for x in range(0, B)]
input_batch = input_all[idx]
target_batch = target_all[idx]
state, last_loss = model.train_batch(sess, input_batch, target_batch, state)
if step % 20 == 0:
print('step {0}: loss = {1:.3f} (perplexity = {2})'.format(step, last_loss, math.exp(last_loss)))
if step % 200 == 0:
saved_file = saver.save(sess, os.path.join(checkpoint_dir, "model.ckpt"), global_step=step)
print("Saved to " + saved_file)
print("Last state: {0}".format(model.state.eval()))
break;
step = step + 1
The problem is solved. It had nothing to do with RNN nor TensorFlow.
I changed
chars = list(set(data))
to
chars = sorted(set(data))
and now it works.
This is because python uses a random hash function to build the set, and every time python restarted, 'chars' had a different ordering.

Tensorflow RNN: Perplexity per Epoch remains constant

I am training an RNN-based language-model using Tensorflow. The model is very similar to the PTB model example in the TF tutorials section. However, when I attempt to train the model on my own data, the perplexity of the model does not go down; it remains constant throughout multiple epochs. Could anyone let me know what I might be doing wrong.
I have a feeling that I am not handling the targets properly, but the gist of my code for the targets is:
def batcher(batch_size,unroll_steps,data,pad):
print(len(data))
batches = len(data) / batch_size
inp = []
target = []
for i in range(batches):
#print(len(data[i*batch_size:(i+1)*batch_size]))
x = data[i*batch_size:(i+1)*batch_size]
y = [ line[1:]+[pad] for line in x ]
yield (x,y)
That is, I just shift the data by 1 and use that as the target for the next word in a sentence.
The training script and model (class) are seen below
Training script (excerpt):
def train(session, model, folder,batch_size,unroll_steps,epoch):
word_to_id, id_to_word, train, val = build_inputs(folder,unroll_steps)
pad = word_to_id['<pad>']
costs = 0
iters = 0
train_size = len(train)
batch_size = model.batch_size
batches = train_size / batch_size
state = session.run(model._initial_state)
print("Running epoch %d" % epoch)
for i in range(batches):
fetches = [model.cost, model._final_state, model.logits]
feed_dict = {}
x = train[i*batch_size:(i+1)*batch_size]
y = [ line[1:] +[pad] for line in x ]
feed_dict[model.input] = x
feed_dict[model.targets] = y
feed_dict[model._initial_state] = state
#print("Cell-state complete - Running")
cost, state, logits = session.run(fetches, feed_dict)
#print("Single Run complete")
costs += cost
iters += model.unroll_steps
print("\tEpoch %d: Perplexity is %f" % (epoch, np.exp(costs/iters)))
return np.exp(costs/iters)
Model:
import tensorflow as tf
class LM(object):
def __init__(self, train, max_gradient, batch_size, unroll_steps, vocab, size, layers, learning_rate, init, prob):
self.batch_size = batch_size
self.max_gradient = max_gradient
self.layers = layers
self.learning_rate = learning_rate
self.unroll_steps = unroll_steps
self.init = init
#with tf. name_scope("Paramters"):
with tf.device('/gpu:0'), tf.name_scope("Input"):
self.input = tf.placeholder(tf.int64, shape=[batch_size, unroll_steps], name="input")
self.targets = tf.placeholder(tf.int64, shape=[batch_size, unroll_steps], name="targets")
#self.init = tf.placeholder(tf.float32, shape=[], name="init")
with tf.device('/gpu:0'), tf.name_scope("Embedding"):
embedding = tf.Variable(tf.random_uniform([vocab, size], -self.init, self.init), dtype=tf.float32, name="embedding")
embedded_input = tf.nn.embedding_lookup(embedding, self.input, name="embedded_input")
with tf.device('/gpu:0'), tf.name_scope("RNN"), tf.variable_scope(tf.get_variable_scope(), reuse = False) as scope:
lstm_cell = tf.contrib.rnn.BasicLSTMCell(size, forget_bias=0.0, state_is_tuple=True)
if train and prob < 1.0:
lstm_cell = tf.contrib.rnn.DropoutWrapper(lstm_cell, output_keep_prob=prob)
cell = tf.contrib.rnn.MultiRNNCell([lstm_cell for _ in range(layers)], state_is_tuple=True)
self._initial_state = cell.zero_state(batch_size, tf.float32)
outputs = []
state = self._initial_state
for step in range(unroll_steps):
if step > 0: tf.get_variable_scope().reuse_variables()
(cell_output, state) = cell(embedded_input[:, step, :], state)
outputs.append(cell_output)
with tf.device('/gpu:0'), tf.name_scope("Cost"), tf.variable_scope(tf.get_variable_scope(), reuse = False) as scope:
output = tf.reshape(tf.concat(outputs,1), [-1,size])
softmax_w = tf.get_variable("softmax_w", [size, vocab], dtype=tf.float32)
softmax_b = tf.get_variable("softmax_b", [vocab], dtype=tf.float32)
logits = tf.matmul(output, softmax_w) + softmax_b
losses = []
for logit, target in zip([logits], [tf.reshape(self.targets,[-1])]):
target = tf.reshape(target, [-1])
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit,labels=target)
losses.append(loss)
self.cost = tf.reduce_sum(losses) / batch_size
self._final_state = state
self.logits = logits
scope.reuse_variables()
if not train:
return
with tf.device('/gpu:0'), tf.name_scope("Train"), tf.variable_scope(tf.get_variable_scope(), reuse=False):
train_variables = tf.trainable_variables()
gradients, _ = tf.clip_by_global_norm(tf.gradients(self.cost, train_variables),self.max_gradient)
optimizer = tf.train.AdamOptimizer(self.learning_rate)
self.training = optimizer.apply_gradients(zip(gradients, train_variables))
tf.get_variable_scope().reuse_variables()

Reuse Reusing Variable of LSTM in Tensorflow

I'm trying to make RNN using LSTM.
I made LSTM model, and after it, there is two DNN network, and one regression output layer.
I trained my data, and the final training loss become about 0.009.
However, when i applied the model to test data, the loss become about 0.5.
The 1th epoch training loss is about 0.5.
So, I think the trained variable do not used in test model.
The only difference between training and test model is batch size.
Trainning Batch = 100~200, Test Batch Size = 1.
in main function i made LSTM instance.
In LSTM innitializer, the model is made.
def __init__(self,config,train_model=None):
self.sess = sess = tf.Session()
self.num_steps = num_steps = config.num_steps
self.lstm_size = lstm_size = config.lstm_size
self.num_features = num_features = config.num_features
self.num_layers = num_layers = config.num_layers
self.num_hiddens = num_hiddens = config.num_hiddens
self.batch_size = batch_size = config.batch_size
self.train = train = config.train
self.epoch = config.epoch
self.learning_rate = learning_rate = config.learning_rate
with tf.variable_scope('model') as scope:
self.lstm_cell = lstm_cell = tf.nn.rnn_cell.LSTMCell(lstm_size,initializer = tf.contrib.layers.xavier_initializer(uniform=False))
self.cell = cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * num_layers)
with tf.name_scope('placeholders'):
self.x = tf.placeholder(tf.float32,[self.batch_size,num_steps,num_features],
name='input-x')
self.y = tf.placeholder(tf.float32, [self.batch_size,num_features],name='input-y')
self.init_state = cell.zero_state(self.batch_size,tf.float32)
with tf.variable_scope('model'):
self.W1 = tf.Variable(tf.truncated_normal([lstm_size*num_steps,num_hiddens],stddev=0.1),name='W1')
self.b1 = tf.Variable(tf.truncated_normal([num_hiddens],stddev=0.1),name='b1')
self.W2 = tf.Variable(tf.truncated_normal([num_hiddens,num_hiddens],stddev=0.1),name='W2')
self.b2 = tf.Variable(tf.truncated_normal([num_hiddens],stddev=0.1),name='b2')
self.W3 = tf.Variable(tf.truncated_normal([num_hiddens,num_features],stddev=0.1),name='W3')
self.b3 = tf.Variable(tf.truncated_normal([num_features],stddev=0.1),name='b3')
self.output, self.loss = self.inference()
tf.initialize_all_variables().run(session=sess)
tf.initialize_variables([self.b2]).run(session=sess)
if train_model == None:
self.train_step = tf.train.GradientDescentOptimizer(self.learning_rate).minimize(self.loss)
Using Above LSTM init, below LSTM instance are made.
with tf.variable_scope("model",reuse=None):
train_model = LSTM(main_config)
with tf.variable_scope("model", reuse=True):
predict_model = LSTM(predict_config)
after making two LSTM instance, I trained the train_model.
And I input the test set in predict_model.
Why the variable are not reused?
The problem is that you should be using tf.get_variable() to create your variables, instead of tf.Variable(), if you are reusing a scope.
Take a look at this tutorial for sharing variables, you'll understand it better.
Also, you don't need to use a session here, because you don't have to initialize your variables when you are defining the model, the variables should be initialized when you are about to train your model.
The code to reuse the variables is the following:
def __init__(self,config,train_model=None):
self.num_steps = num_steps = config.num_steps
self.lstm_size = lstm_size = config.lstm_size
self.num_features = num_features = config.num_features
self.num_layers = num_layers = config.num_layers
self.num_hiddens = num_hiddens = config.num_hiddens
self.batch_size = batch_size = config.batch_size
self.train = train = config.train
self.epoch = config.epoch
self.learning_rate = learning_rate = config.learning_rate
with tf.variable_scope('model') as scope:
self.lstm_cell = lstm_cell = tf.nn.rnn_cell.LSTMCell(lstm_size,initializer = tf.contrib.layers.xavier_initializer(uniform=False))
self.cell = cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * num_layers)
with tf.name_scope('placeholders'):
self.x = tf.placeholder(tf.float32,[self.batch_size,num_steps,num_features],
name='input-x')
self.y = tf.placeholder(tf.float32, [self.batch_size,num_features],name='input-y')
self.init_state = cell.zero_state(self.batch_size,tf.float32)
with tf.variable_scope('model'):
self.W1 = tf.get_variable(initializer=tf.truncated_normal([lstm_size*num_steps,num_hiddens],stddev=0.1),name='W1')
self.b1 = tf.get_variable(initializer=tf.truncated_normal([num_hiddens],stddev=0.1),name='b1')
self.W2 = tf.get_variable(initializer=tf.truncated_normal([num_hiddens,num_hiddens],stddev=0.1),name='W2')
self.b2 = tf.get_variable(initializer=tf.truncated_normal([num_hiddens],stddev=0.1),name='b2')
self.W3 = tf.get_variable(initializer=tf.truncated_normal([num_hiddens,num_features],stddev=0.1),name='W3')
self.b3 = tf.get_variable(initializer=tf.truncated_normal([num_features],stddev=0.1),name='b3')
self.output, self.loss = self.inference()
if train_model == None:
self.train_step = tf.train.GradientDescentOptimizer(self.learning_rate).minimize(self.loss)
To see which variables are created after you create train_model and predict_model use the following code:
for v in tf.all_variables():
print(v.name)