Shapes are incompatible at the last records of tf.data.Dataset.from_tensor_slices - tensorflow

I have implemented seq2seq translation model in Tensorflow 2.0
But during training I get the following error:
ValueError: Shapes (2056, 10, 10000) and (1776, 10, 10000) are incompatible
I have 10000 records in my dataset. Starting from the first record untill 8224 records dimensions matches. But for the last 1776 records I get the error mentioned above just because my batch_size is bigger than remaining number of records. Here is my code:
max_seq_len_output = 10
n_words = 10000
batch_size = 2056
model = Model_translation(batch_size = batch_size,embed_size = embed_size,total_words = n_words , dropout_rate = dropout_rate,num_classes = n_words,embedding_matrix = embedding_matrix)
dataset_train = tf.data.Dataset.from_tensor_slices((encoder_input,decoder_input,decoder_output))
dataset_train = dataset_train.shuffle(buffer_size = 1024).batch(batch_size)
loss_object = tf.keras.losses.CategoricalCrossentropy()#used in backprop
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
train_loss = tf.keras.metrics.Mean(name='train_loss')#mean of the losses per observation
train_accuracy =tf.keras.metrics.CategoricalAccuracy(name='train_accuracy')
##### no #tf.function here
def training(X_1,X_2,y):
#creation of one-hot-encoding, because if I would do it out of the loop if would have RAM problem
y_numpy = y.numpy()
Y = np.zeros((batch_size,max_seq_len_output,n_words),dtype='float32')
for i, d in enumerate(y_numpy):
for t, word in enumerate(d):
if word != 0:
Y[i, t, word] = 1
Y = tf.convert_to_tensor(Y)
#predictions
with tf.GradientTape() as tape:#Trainable variables (created by tf.Variable or tf.compat.v1.get_variable, where trainable=True is default in both cases) are automatically watched.
predictions = model(X_1,X_2)
loss = loss_object(Y,predictions)
gradients = tape.gradient(loss,model.trainable_variables)
optimizer.apply_gradients(zip(gradients,model.trainable_variables))
train_loss(loss)
train_accuracy(Y,predictions)
del Y
del y_numpy
EPOCHS = 70
for epoch in range(EPOCHS):
for X_1,X_2,y in dataset_train:
training(X_1,X_2,y)
template = 'Epoch {}, Loss: {}, Accuracy: {}'
print(template.format(epoch+1,train_loss.result(),train_accuracy.result()*100))
# Reset the metrics for the next epoch
train_loss.reset_states()
train_accuracy.reset_states()
How can I fixt this problem?

One solution would be to drop the remainder during batching with
dataset_train = dataset_train.shuffle(buffer_size = 1024).batch(batch_size, drop_remainder=True)

Related

Time-Series Transformer Model trains well but performs worse on Test Data

I have created a transformer model for multivariate time series predictions (many-to-one classification model).
Details about the Dataset
I have the hourly varying data i.e., 8 different features (hour, month, temperature, humidity, windspeed, solar radiations concentration etc.) and with them I am trying to predict the time sequence (energy consumption of a building. So my input has the shape X.shape = (8783, 168, 8) i.e., 8783 time sequences, each sequence contains 168 hourly entries/vectors and each vector contains 8 features. My output has the shape Y.shape = (8783,1) i.e., 8783 sequences each containing 1 output value (i.e., building energy consumption value after every hour).
Model Details
I took as a model an example from the official keras site. It is created for classification problems, I modified it for my regression problem by changing the activation of last output layer from sigmoid to relu.
Input shape (train_f) = (8783, 168, 8)
Output shape (train_P) = (8783,1)
When I train the model for 100 no. of epochs it converges very well for less number of epochs as compared to my reference models (i.e., LSTMs and LSTMS with self attention). After training, when the model is asked to make prediction by feeding in the test data, the prediction performance is worse as compare to the reference models.
I would be grateful if you please have a look at the code and let me know of the potential steps to improve the prediction/test accuracy.
Here is the code;
df_weather = pd.read_excel(r"Downloads\WeatherData.xlsx")
df_energy = pd.read_excel(r"Downloads\Building_energy_consumption_record.xlsx")
visa = pd.concat([df_weather, df_energy], axis = 1)
df_data = visa.loc[:, ~visa.columns.isin(["Time1", "TD", "U", "DR", "FX"])
msna.bar(df_data)
plt.figure(figsize = (16,6))
sb.heatmap(df_data.corr(), annot = True, linewidths=1, fmt = ".2g", cmap= 'coolwarm')
plt.xticks(rotation = 'horizontal') # how the titles will look likemeans their orientation
extract_for_normalization = list(df_data)[1:9]
df_data_float = df_data[extract_for_normalization].astype(float)
from sklearn.model_selection import train_test_split
train_X, test_X = train_test_split(df_data_float, train_size = 0.7, shuffle = False)
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_train_X=scaler.fit_transform(train_X)
**Converting train_X into required shape (inputs,sequences, features)**
train_f = [] #features input from training data
train_p = [] # prediction values
#test_q = []
#test_r = []
n_future = 1 #number of days we want to predict into the future
n_past = 168 # no. of time series input features to be considered for training
for val in range(n_past, len(scaled_train_X) - n_future+1):
train_f.append(scaled_train_X[val - n_past:val, 0:scaled_train_X.shape[1]])
train_p.append(scaled_train_X[val + n_future - 1:val + n_future, -1])
train_f, train_p = np.array(train_f), np.array(train_p)
**Transformer Model**
def transformer_encoder(inputs, head_size, num_heads, ff_dim, dropout=0):
# Normalization and Attention
x = layers.LayerNormalization(epsilon=1e-6)(inputs)
x = layers.MultiHeadAttention(
key_dim=head_size, num_heads=num_heads, dropout=dropout
)(x, x)
x = layers.Dropout(dropout)(x)
res = x + inputs
# Feed Forward Part
x = layers.LayerNormalization(epsilon=1e-6)(res)
x = layers.Conv1D(filters=ff_dim, kernel_size=1, activation="relu")(x)
x = layers.Dropout(dropout)(x)
x = layers.Conv1D(filters=inputs.shape[-1], kernel_size=1)(x)
return x + res
def build_model(
input_shape,
head_size,
num_heads,
ff_dim,
num_transformer_blocks,
mlp_units,
dropout=0,
mlp_dropout=0,
):
inputs = keras.Input(shape=input_shape)
x = inputs
for _ in range(num_transformer_blocks):
x = transformer_encoder(x, head_size, num_heads, ff_dim, dropout)
x = layers.GlobalAveragePooling1D(data_format="channels_first")(x)
for dim in mlp_units:
x = layers.Dense(dim, activation="relu")(x)
x = layers.Dropout(mlp_dropout)(x)
outputs = layers.Dense(train_p.shape[1])(x)
return keras.Model(inputs, outputs)
input_shape = (train_f.shape[1], train_f.shape[2])
model = build_model(
input_shape,
head_size=256,
num_heads=4,
ff_dim=4,
num_transformer_blocks=4,
mlp_units=[128],
mlp_dropout=0.4,
dropout=0.25,
)
model.compile(loss=tf.keras.losses.mean_absolute_error,
optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001),
metrics=["mse"])
model.summary()
history = model.fit(train_f, train_p, epochs=100, batch_size = 32, validation_split = 0.15, verbose = 1)
trainYPredict = model.predict(train_f)
**Inverse transform the prediction and keep the last value(output)**
trainYPredict1 = np.repeat(trainYPredict, scaled_train_X.shape[1], axis = -1)
trainYPredict_actual = scaler.inverse_transform(trainYPredict1)[:, -1]
train_p_actual = np.repeat(train_p, scaled_train_X.shape[1], axis = -1)
train_p_actual1 = scaler.inverse_transform(train_p_actual)[:, -1]
Prediction_mse=mean_squared_error(train_p_actual1 ,trainYPredict_actual)
print("Mean Squared Error of prediction is:", str(Prediction_mse))
Prediction_rmse =sqrt(Prediction_mse)
print("Root Mean Squared Error of prediction is:", str(Prediction_rmse))
prediction_r2=r2_score(train_p_actual1 ,trainYPredict_actual)
print("R2 score of predictions is:", str(prediction_r2))
prediction_mae=mean_absolute_error(train_p_actual1 ,trainYPredict_actual)
print("Mean absolute error of prediction is:", prediction_mae)
**Testing of model**
scaled_test_X = scaler.transform(test_X)
test_q = []
test_r = []
for val in range(n_past, len(scaled_test_X) - n_future+1):
test_q.append(scaled_test_X[val - n_past:val, 0:scaled_test_X.shape[1]])
test_r.append(scaled_test_X[val + n_future - 1:val + n_future, -1])
test_q, test_r = np.array(test_q), np.array(test_r)
testPredict = model.predict(test_q )
Validation and training loss image is also attached Training and validation Loss

MXNET custom symbol loss with gluon

I wrote this code,(almost are from tutorial, I just modified a few lines)
and this is not working.
from mxnet import gluon
from mxnet.gluon import nn
np.random.seed(42)
mx.random.seed(42)
ctx = mx.gpu()
def data_xform(data):
"""Move channel axis to the beginning, cast to float32, and normalize to [0, 1]."""
return nd.moveaxis(data, 2, 0).astype('float32') / 255
# prepare data
train_data = mx.gluon.data.vision.MNIST(train=True).transform_first(data_xform)
val_data = mx.gluon.data.vision.MNIST(train=False).transform_first(data_xform)
batch_size = 100
train_loader = mx.gluon.data.DataLoader(train_data, shuffle=True, batch_size=batch_size)
val_loader = mx.gluon.data.DataLoader(val_data, shuffle=False, batch_size=batch_size)
# create network
data = mx.symbol.Variable('data')
fc1 = mx.symbol.FullyConnected(data = data, name='fc1', num_hidden=128)
act1 = mx.symbol.Activation(data = fc1, name='relu1', act_type="relu")
fc2 = mx.symbol.FullyConnected(data = act1, name = 'fc2', num_hidden = 64)
act2 = mx.symbol.Activation(data = fc2, name='relu2', act_type="relu")
fc3 = mx.symbol.FullyConnected(data = act2, name='fc3', num_hidden=10)
net= gluon.SymbolBlock(outputs=[fc3], inputs=[data])
net.initialize(ctx=ctx)
# create trainer, metric
trainer = gluon.Trainer(
params=net.collect_params(),
optimizer='sgd',
optimizer_params={'learning_rate': 0.1, 'momentum':0.9, 'wd':0.00001},
)
metric = mx.metric.Accuracy()
# learn
num_epochs = 10
for epoch in range(num_epochs):
for inputs, labels in train_loader:
inputs = inputs.as_in_context(ctx)
labels = labels.as_in_context(ctx)
with autograd.record():
outputs = net(inputs)
# softmax
exps = nd.exp(outputs - outputs.min(axis=1).reshape((-1,1)))
exps = exps / exps.sum(axis=1).reshape((-1,1))
# cross entropy
loss = nd.MakeLoss(-nd.log(exps.pick(labels)))
#
#loss = gluon.loss.SoftmaxCrossEntropyLoss()(outputs, labels)
#print(loss)
loss.backward()
metric.update(labels, outputs)
trainer.step(batch_size=inputs.shape[0])
name, acc = metric.get()
print('After epoch {}: {} = {}'.format(epoch + 1, name, acc))
metric.reset()
If I use gluon.loss.SoftmaxCrossEntropyLoss, this runs well..
When I print loss in both cases, output values are look same.
What are the differences?
Thank you for advance
I am not entirely sure, why you subtract outputs.min() when calculating softmax. Original softmax function doesn't do anything like that - https://en.wikipedia.org/wiki/Softmax_function. If you don't do that, you will get a good value of accuracy:
# softmax
exps = nd.exp(outputs)
exps = exps / exps.sum(axis=1).reshape((-1, 1))
# cross entropy
loss = nd.MakeLoss(-nd.log(exps.pick(labels)))
I get:
After epoch 1: accuracy = 0.89545
After epoch 2: accuracy = 0.9639
After epoch 3: accuracy = 0.97395
After epoch 4: accuracy = 0.9784
After epoch 5: accuracy = 0.98315

How to find accuracy for logistic regression and gradient descent with training and validation data sets?

I am trying to implement logistic regression with gradient descent on the notMNIST dataset. This is my code thus far, which parses the data and plots the accuracy against the epochs. I have done my training in 7 mini batches of 500 each. There are a total of 5000 iterations and therefore 5000/7 epochs.
My goal is to find the accuracy after each epoch and plot it against the epoch. And I want to do the same with the average loss at each epoch. I want to do this for the validation points.
This is the loss function I am implementing.
However, for some reason, when I try to calculate accuracy I always get 100%, which doesn't make sense since I am finding the weight from the training and then using it on the validation set, so the algorithm cannot be correct 100% of the time. Also when I plot the losses, I get a linear function, which also doesn't make any sense.
Does anyone have ideas about what I am doing wrong? Any help would be appreciated!
#implement logistic regression
#logistic regression prediction function is y = sigmoid(W^Tx + b)
#train the logistic regression model using SGD and mini batch size B = 500 on the two-class notNMIST dataset
#how to train the dataset:
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
##############Constants##################################
BATCH_SIZE = 500;
NUM_BATCHES = 7;
NUM_ITERATIONS = 5000;
LEARNING_RATE = [0.005]#0.001, 0.0001];
PIXEL_SIZE = 784; #28x28
NUM_TRAINING_POINTS = 3500;
NUM_VALID_POINTS = 100;
###############Extracting data############################
with np.load("notMNIST.npz") as data :
Data, Target = data ["images"], data["labels"]
posClass = 2
negClass = 9
dataIndx = (Target==posClass) + (Target==negClass)
Data = Data[dataIndx]/255.
Target = Target[dataIndx].reshape(-1, 1)
Target[Target==posClass] = 1
Target[Target==negClass] = 0
np.random.seed(521)
randIndx = np.arange(len(Data))
np.random.shuffle(randIndx)
Data, Target = Data[randIndx], Target[randIndx]
trainData, trainTarget = Data[:3500], Target[:3500]
validData, validTarget = Data[3500:3600], Target[3500:3600]
testData, testTarget = Data[3600:], Target[3600:]
################Manipulating Data##########################
trainX = np.reshape(trainData, (NUM_TRAINING_POINTS, PIXEL_SIZE));
validX = np.reshape(validData, (NUM_VALID_POINTS, PIXEL_SIZE))
batchesX = np.array(np.split(trainX, NUM_BATCHES));
batchesY = np.array(np.split(trainTarget, NUM_BATCHES));
################Defining variables########################
loss_Values = [[0 for x in range(NUM_BATCHES)] for y in range(715)]
lr = dict()
epoch_list = []
mean_list = []
accuracy_list = []
x = tf.placeholder(tf.float32, [PIXEL_SIZE, None], name = "input_points") #784 dimensions (28x28 pixels)
W = tf.Variable(tf.truncated_normal(shape=[PIXEL_SIZE,1], stddev=0.5), name='weights')
b = tf.Variable(0.0, name='bias')
y = tf.placeholder(tf.float32, [None,1], name = "target_labels")#target labels
lambda_ = 0.01
##############Calculations###############################
#weight_squared_sum = tf.matmul(tf.transpose(W),W) #find the square of the weight vector
#calculating the bias term
with tf.Session() as sess:
tf.global_variables_initializer().run()
weight = W.eval()
weight_squared_sum = np.linalg.norm(weight)
loss_W = lambda_ /2 * weight_squared_sum #find the loss
y_hat = tf.add(tf.matmul(tf.transpose(W), x), b) #based on the sigmoid equation
y_hat = tf.transpose(y_hat)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(logits = y_hat, labels = y) #sigmoid_cross_entropy_with_logits takes in the actual y and the predicted y
total_loss = tf.add(tf.reduce_mean(cross_entropy,0),loss_W)
#############Training######################################
epoch = 0
with tf.Session() as sess:
epoch = 0;
tf.global_variables_initializer().run()
for learning_rate in LEARNING_RATE:
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(total_loss) #change the learning rate each time
for i in range(NUM_BATCHES*NUM_ITERATIONS):
sess.run(train_step, feed_dict={x:np.transpose(batchesX[i%NUM_BATCHES]), y: batchesY[i%NUM_BATCHES]})
print("i: ",i)
print("LOSS:")
print(sess.run(total_loss, feed_dict={x:np.transpose(batchesX[i%NUM_BATCHES]), y: batchesY[i%NUM_BATCHES]}))
if( i % NUM_BATCHES == 0): #everytime we reach 0, a new epoch has started
loss_Values[epoch][i%NUM_BATCHES] = sess.run(cross_entropy, feed_dict={x: np.transpose(batchesX[i%NUM_BATCHES]) , y: batchesY[i%NUM_BATCHES]});
correct_prediction = tf.equal(y, y_hat)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
accuracy_val = sess.run(accuracy, feed_dict={x: np.transpose(validX) , y: validTarget})
print("Accuracy: ", accuracy_val)
accuracy_list.append(accuracy_val)
epoch = epoch + 1;
lr[learning_rate] = loss_Values;
print("Final value")
#for plotting purposes
N = len(loss_Values)
for epoch in range (N): #find average over all input points in one epoch
epoch_list.append(epoch)
row = np.array(loss_Values[epoch])
mean = np.add.reduce(row) / 3500;
mean_list.append(mean)
epoch_list = np.array(epoch_list)
mean_list = np.array(epoch_list)
accuracy_list = np.array(epoch_list)
plt.figure()
plt.plot(epoch_list, accuracy_list, '-', label = 'Average loss')
plt.show()

CNN converges to same accuracy regardless of hyperparameters, what does this indicate?

I have written tensorflow code based on:
http://www.wildml.com/2015/12/implementing-a-cnn-for-text-classification-in-tensorflow/
but using precomputed word embeddings from the GoogleNews word2vec 300 dimension model.
I created my own data from the UCML News Aggregator Dataset in which I parsed the content of the news articles and have created my own labels.
Due to the size of the articles I use TF-IDF to filter out the top 120 words per article and embed those into 300 dimensions.
When I run the CNN I created regardless of the hyper parameters it converges to a small general accuracy, around 38%.
Hyper parameters changed:
Various filter sizes:
I've tried a single filter of 1,2,3
Combinations of filters [3,4,5], [1,3,4]
Learning Rate:
I've varied this from very low to very high, very low doesn't converge to 38% but anything between 0.0001 and 0.4 does.
Batch Size:
Tried many ranges between 5 and 100.
Weight and Bias Initialization:
Set stddev of weights between 0.4 and 0.01.
Set bias initial values between 0 and 0.1.
Tried using the xavier initializer for the conv2d weights.
Dataset Size:
I have only tried on two partial data sets, one with 15 000 training data, and the other on the 5000 test data. In total I have 263 000 data to train on. There is no accuracy difference whether trained and evaluated on the 15 000 training data or by using the 5000 test data as the training data (to save testing time).
I've run successful classifications on the 15 000 / 5000 split using a feed forward network with a BoW input (93% accurate), TF-IDF with SVM (92%), and TF-IDF with Native Bayes (91.5%). So I don't think it is the data.
What does this imply? Is the model just a poor model for this task? Is there an error in my work?
I feel like my do_eval function is incorrect to evaluate the accuracy / loss over an epoch of the data:
def do_eval(data_set,
label_set,
batch_size):
"""
Runs one evaluation against the full epoch of data.
data_set: The set of embeddings to eval
label_set: the set of labels to eval
"""
# And run one epoch of eval.
true_count = 0 # Counts the number of correct predictions.
steps_per_epoch = len(label_set) // batch_size
num_examples = steps_per_epoch * batch_size
totalLoss = 0
# Need to compute eval accuracy
for evalStep in xrange(steps_per_epoch):
input_batch, label_batch = nextBatch(data_set, labels_set, batchSize)
evalAcc, evalLoss = eval_step(input_batch, label_batch)
true_count += evalAcc * batchSize
totalLoss += evalLoss
precision = float(true_count) / num_examples
print(' Num examples: %d Num correct: %d Precision # 1: %0.04f' % (num_examples, true_count, precision))
print("Eval Loss: " + str(totalLoss))
The entire model is as follows:
class TextCNN(object):
"""
A CNN for text classification
Uses a convolutional, max-pooling and softmax layer.
"""
def __init__(
self, batchSize, numWords, num_classes,
embedding_size, filter_sizes, num_filters):
# Set place holders
self.input_placeholder = tf.placeholder(tf.float32,[batchSize,numWords,embedding_size,1])
self.labels = tf.placeholder(tf.int32, [batchSize,num_classes])
self.pKeep = tf.placeholder(tf.float32)
# Inference
'''
Ready to build conv layers followed by max pooling layers
Each conv layer produces a different shaped output so need to loop over
them and create a layer for each and then merge the results
'''
pooled_outputs = []
for i, filter_size in enumerate(filter_sizes):
with tf.name_scope("conv-maxpool-%s" % filter_size):
# Convolution Layer
filter_shape = [filter_size, embedding_size, 1, num_filters]
# W: Filter matrix
W = tf.Variable(tf.truncated_normal(filter_shape,stddev=0.01), name='W')
b = tf.Variable(tf.constant(0.0,shape=[num_filters]),name="b")
# Valid padding: Narrow convolution (no edge padded so filter slides over everything)
# Output size = (input_size (numWords in this case) + 2 * padding (0 in this case) - filter_size) + 1
conv = tf.nn.conv2d(
self.input_placeholder,
W,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
# Apply nonlinearity i.e add the bias to Wx + b
# Where Wx is the conv layer above
# Then run it through the activation function
h = tf.nn.relu(tf.nn.bias_add(conv, b),name='relu')
# Max-pooling over the outputs
# Max-pool to control the output size
# By taking only the best features determined by the filter
# Ksize is the size of the window of the input tensor
pooled = tf.nn.max_pool(
h,
ksize=[1, numWords - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID',
name="pool")
# Each pooled outputs a tensor of size
# [batchSize, 1, 1, num_filters] where num_filters represents the
# Number of features we wanted pooled
pooled_outputs.append(pooled)
# Combine all pooled features
num_filters_total = num_filters * len(filter_sizes)
# Concat the pool output along the 3rd (num_filters / feature size) dimension
self.h_pool = tf.concat(pooled_outputs, 3)
# Flatten
self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])
# Add drop out to regularize the learning curve / accuracy
with tf.name_scope("dropout"):
self.h_drop = tf.nn.dropout(self.h_pool_flat,self.pKeep)
# Fully connected output layer
with tf.name_scope("output"):
W = tf.Variable(tf.truncated_normal([num_filters_total,num_classes],stddev=0.01),name="W")
b = tf.Variable(tf.constant(0.0,shape=[num_classes]), name='b')
self.logits = tf.nn.xw_plus_b(self.h_drop, W, b, name='logits')
self.predictions = tf.argmax(self.logits, 1, name='predictions')
# Loss
with tf.name_scope("loss"):
losses = tf.nn.softmax_cross_entropy_with_logits(labels=self.labels,logits=self.logits, name="xentropy")
self.loss = tf.reduce_mean(losses)
# Accuracy
with tf.name_scope("accuracy"):
correct_predictions = tf.equal(self.predictions, tf.argmax(self.labels,1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
##################################################################################################################
# Running the training
# Define various parameters for network
batchSize = 100
numWords = 120
embedding_size = 300
num_classes = 4
filter_sizes = [3,4,5] # slide over a the number of words, i.e 3 words, 4 words etc...
num_filters = 126
maxSteps = 5000
initial_learning_rate = 0.001
dropoutRate = 1
data_set = np.load("/home/kevin/Documents/NSERC_2017/articles/classifyDataSet/TestSmaller_CNN_inputMat_0.npy")
labels_set = np.load("Test_NN_target_smaller.npy")
with tf.Graph().as_default():
sess = tf.Session()
with sess.as_default():
cnn = TextCNN(batchSize=batchSize,
numWords=numWords,
num_classes=num_classes,
num_filters=num_filters,
embedding_size=embedding_size,
filter_sizes=filter_sizes)
# Define training operation
# Pick an optimizer, set it's learning rate, and tell it what to minimize
global_step = tf.Variable(0,name='global_step', trainable=False)
optimizer = tf.train.AdamOptimizer(initial_learning_rate)
grads_and_vars = optimizer.compute_gradients(cnn.loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
# Summaries to save for tensor board
# Set directory
out_dir = "/home/kevin/Documents/NSERC_2017/articles/classifyDataSet/tf_logs/CNN_Embedding/"
# Loss and accuracy summaries
loss_summary = tf.summary.scalar("loss",cnn.loss)
acc_summary = tf.summary.scalar("accuracy", cnn.accuracy)
# Train summaries
train_summary_op = tf.summary.merge([loss_summary,acc_summary])
train_summary_dir = out_dir + "train/"
train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)
# Test summaries
test_summary_op = tf.summary.merge([loss_summary, acc_summary])
test_summary_dir = out_dir + "test/"
test_summary_write = tf.summary.FileWriter(test_summary_dir, sess.graph)
# Init all variables
init = tf.global_variables_initializer()
sess.run(init)
############################################################################################
def train_step(input_data, labels_data):
'''
Single training step
:param input_data: input
:param labels_data: labels to train to
'''
feed_dict = {
cnn.input_placeholder: input_data,
cnn.labels: labels_data,
cnn.pKeep: dropoutRate
}
_, step, summaries, loss, accuracy = sess.run(
[train_op, global_step, train_summary_op, cnn.loss, cnn.accuracy],
feed_dict=feed_dict)
train_summary_writer.add_summary(summaries, step)
###############################################################################################
def eval_step(input_data, labels_data, writer=None):
"""
Evaluates model on a test set
Single step
"""
feed_dict = {
cnn.input_placeholder: input_data,
cnn.labels: labels_data,
cnn.pKeep: 1.0
}
step, summaries, loss, accuracy = sess.run(
[global_step, test_summary_op, cnn.loss, cnn.accuracy],
feed_dict)
if writer:
writer.add_summary(summaries, step)
return accuracy, loss
###############################################################################
def nextBatch(data_set, labels_set, batchSize):
'''
Get the next batch of data
:param data_set: entire training or test data set
:param labels_set: entire training or test label set
:param batchSize: batch size
:return: a batch of the data and it's corresponding labels
'''
# Generate random row indices for the documents
rand_index = np.random.choice(data_set.shape[0], size=batchSize)
# Grab the data to give to the feed dicts
data_batch, labels_batch = data_set[rand_index, :, :], labels_set[rand_index, :]
# Resize for tensorflow
data_batch = data_batch.reshape([data_batch.shape[0],data_batch.shape[1],data_batch.shape[2],1])
return data_batch, labels_batch
################################################################################
def do_eval(data_set,
label_set,
batch_size):
"""
Runs one evaluation against the full epoch of data.
data_set: The set of embeddings to eval
label_set: the set of labels to eval
"""
# And run one epoch of eval.
true_count = 0 # Counts the number of correct predictions.
steps_per_epoch = len(label_set) // batch_size
num_examples = steps_per_epoch * batch_size
totalLoss = 0
# Need to compute eval accuracy
for evalStep in xrange(steps_per_epoch):
input_batch, label_batch = nextBatch(data_set, labels_set, batchSize)
evalAcc, evalLoss = eval_step(input_batch, label_batch)
true_count += evalAcc * batchSize
totalLoss += evalLoss
precision = float(true_count) / num_examples
print(' Num examples: %d Num correct: %d Precision # 1: %0.04f' % (num_examples, true_count, precision))
print("Eval Loss: " + str(totalLoss))
######################################################################################################
# Training Loop
for step in range(maxSteps):
input_batch, label_batch = nextBatch(data_set,labels_set,batchSize)
train_step(input_batch,label_batch)
# Evaluate over the entire data set on last eval
if step % 100 == 0:
print "On Step : " + str(step) + " of " + str(maxSteps)
do_eval(data_set, labels_set,batchSize)
The embedding is done before the model:
def createInputEmbeddedMatrix(corpusPath, maxWords, svName):
# Create a [docNum, Words per Art, Embedding Size] matrix to fill
genDocsPath = "gen_docs_classifyData_smallerTest_TFIDF.npy"
# corpus = "newsCorpus_word2vec_All_Corpus.mm"
dictPath = 'news_word2vec_smallerDict.dict'
tf_idf_path = "news_tfIdf_word2vec_All.tfidf_model"
gen_docs = np.load(genDocsPath)
dictionary = gensim.corpora.dictionary.Dictionary.load(dictPath)
tf_idf = gensim.models.tfidfmodel.TfidfModel.load(tf_idf_path)
corpus = corpora.MmCorpus(corpusPath)
numOfDocs = len(corpus)
embedding_size = 300
id2embedding = np.load("smallerID2embedding.npy").item()
# Need to process in batches as takes up a ton of memory
step = 5000
totalSteps = int(np.ceil(numOfDocs / step))
for i in range(totalSteps):
# inputMatrix = scipy.sparse.csr_matrix([step,maxWords,embedding_size])
inputMatrix = np.zeros([step, maxWords, embedding_size])
start = i * step
end = start + step
for docNum in range(start, end):
print "On docNum " + str(docNum) + " of " + str(numOfDocs)
# Extract the top N words
topWords, wordVal = tf_idfTopWords(docNum, gen_docs, dictionary, tf_idf, maxWords)
# doc = corpus[docNum]
# Need to track word dex and doc dex seperate
# Doc dex because of the batch processing
wordDex = 0
docDex = 0
for wordID in wordVal:
inputMatrix[docDex, wordDex, :] = id2embedding[wordID]
wordDex += 1
docDex += 1
# Save the batch of input data
# scipy.sparse.save_npz(svName + "_%d" % i, inputMatrix)
np.save(svName + "_%d.npy" % i, inputMatrix)
#####################################################################################
Turns out my error was in the creation of the input matrix.
for i in range(totalSteps):
# inputMatrix = scipy.sparse.csr_matrix([step,maxWords,embedding_size])
inputMatrix = np.zeros([step, maxWords, embedding_size])
start = i * step
end = start + step
for docNum in range(start, end):
print "On docNum " + str(docNum) + " of " + str(numOfDocs)
# Extract the top N words
topWords, wordVal = tf_idfTopWords(docNum, gen_docs, dictionary, tf_idf, maxWords)
# doc = corpus[docNum]
# Need to track word dex and doc dex seperate
# Doc dex because of the batch processing
wordDex = 0
docDex = 0
for wordID in wordVal:
inputMatrix[docDex, wordDex, :] = id2embedding[wordID]
wordDex += 1
docDex += 1
docDex should not have been reset to 0 on each iteration of the inner loop, I was effectively overwriting the first row of my input matrix and thus the rest were 0's.

TensorFlow - Saver.restore not restoring all parameters

I was training Bidirectional LSTM type RNN for nearly 24 hours, and due to oscillation in the error I decided to decrease the learning before allowing it to continue training. Since the model is saved using Saver.save(sess,file) at every epoch, I terminated the training with the CTC Loss having minimised to approximately 115.
Now after restoring the model, the initial error rate I am getting is somewhere around 162, which is inconsistent with the flow of error rate I was getting in 7th epoch, and is also what I got in the first epoch. So it is my impression that either "restore" function is not working or if it is working, then there must be something else that is not allowing it to take effect.
Here is my code:
graph = tf.Graph()
with graph.as_default():
# Graph creation
graph_start = time.time()
seq_inputs = tf.placeholder(tf.float32, shape= [None,batch_size,frame_length], name="sequence_inputs")
seq_lens = tf.placeholder(shape=[batch_size],dtype=tf.int32)
seq_inputs = seq_bn(seq_inputs,seq_lens)
initializer = tf.truncated_normal_initializer(mean=0,stddev=0.1)
forward = tf.nn.rnn_cell.LSTMCell(num_units=num_units,
num_proj = hidden_size,
use_peepholes=use_peephole,
initializer=initializer,
state_is_tuple=True)
forward = tf.nn.rnn_cell.MultiRNNCell([forward] * n_layers, state_is_tuple=True)
backward = tf.nn.rnn_cell.LSTMCell(num_units=num_units,
num_proj= hidden_size,
use_peepholes=use_peephole,
initializer=initializer,
state_is_tuple=True)
backward = tf.nn.rnn_cell.MultiRNNCell([backward] * n_layers, state_is_tuple=True)
[fw_out,bw_out], _ = tf.nn.bidirectional_dynamic_rnn(cell_fw=forward, cell_bw=backward, inputs=seq_inputs,time_major=True, dtype=tf.float32, sequence_length=tf.cast(seq_lens,tf.int64))
# Batch normalize forward output
mew,var_ = tf.nn.moments(fw_out,axes=[0])
fw_out = tf.nn.batch_normalization(fw_out,mew,var_,0.1,1,1e-6)
# fw_out = seq_bn(fw_out,seq_lens)
# Batch normalize backward output
mew,var_ = tf.nn.moments(bw_out,axes=[0])
bw_out = tf.nn.batch_normalization(bw_out,mew,var_,0.1,1,1e-6)
# bw_out = seq_bn(bw_out,seq_lens)
# Reshaping forward, and backward outputs for affine transformation
fw_out = tf.reshape(fw_out,[-1,hidden_size])
bw_out = tf.reshape(bw_out,[-1,hidden_size])
# Linear Layer params
W_fw = tf.Variable(tf.truncated_normal(shape=[hidden_size,n_chars],stddev=np.sqrt(2.0 / (hidden_size))))
W_bw = tf.Variable(tf.truncated_normal(shape=[hidden_size,n_chars],stddev=np.sqrt(2.0 / (hidden_size))))
b_out = tf.constant(0.1,shape=[n_chars])
# Perform an affine transformation
logits = tf.add(tf.add(tf.matmul(fw_out,W_fw),tf.matmul(bw_out,W_bw)),b_out)
logits = tf.reshape(logits,[-1,batch_size,n_chars])
# Use CTC Beam Search Decoder to decode pred string from the prob map
decoded, log_prob = tf.nn.ctc_beam_search_decoder(logits, seq_lens)
# Target params
indices = tf.placeholder(dtype=tf.int64, shape=[None,2])
values = tf.placeholder(dtype=tf.int32, shape=[None])
shape = tf.placeholder(dtype=tf.int64,shape=[2])
# Make targets
targets = tf.SparseTensor(indices,values,shape)
# Compute Loss
loss = tf.reduce_mean(tf.nn.ctc_loss(logits, targets, seq_lens))
# Compute error rate based on edit distance
predicted = tf.to_int32(decoded[0])
error_rate = tf.reduce_sum(tf.edit_distance(predicted,targets,normalize=False))/ \
tf.to_float(tf.size(targets.values))
tvars = tf.trainable_variables()
grad, _ = tf.clip_by_global_norm(tf.gradients(loss,tvars),max_grad_norm)
optimizer = tf.train.MomentumOptimizer(learning_rate=lr,momentum=momentum)
train_step = optimizer.apply_gradients(zip(grad,tvars))
graph_end = time.time()
print("Time elapsed for creating graph: %.3f"%(round(graph_end-graph_start,3)))
# steps per epoch
start_time = 0
steps = int(np.ceil(len(data_train.files)/batch_size))
loss_tr = []
log_tr = []
loss_vl = []
log_vl = []
err_tr = []
err_vl = []
saver = tf.train.Saver()
with tf.Session(config=config) as sess:
#sess.run(tf.initialize_all_variables())
checkpt_path = tf.train.latest_checkpoint(checkpoint_dir)
print(saver.restore(sess,checkpt_path))
print("Model restore from 7th epoch 188th step")
feed = None
epoch = None
step = None
try:
for epoch in range(7,epochs+1):
if epoch==7:
initial_step = 189
else:
initial_step = 0
transcript = []
loss_val = 0
l_pr = 0
start_time = time.time()
for step in range(initial_step,steps):
train_data, transcript, \
targ_indices, targ_values, \
targ_shape, n_frames = data_train.next_batch()
n_frames = np.reshape(n_frames,[-1])
feed = {seq_inputs: train_data, indices:targ_indices, values:targ_values, shape:targ_shape, seq_lens:n_frames}
del train_data,targ_indices,targ_values,targ_shape,n_frames
# Evaluate loss value, decoded transcript, and log probability
_,loss_val,deco,l_pr,err_rt_tr = sess.run([train_step,loss,decoded,log_prob,error_rate],
feed_dict=feed)
del feed
loss_tr.append(loss_val)
log_tr.append(l_pr)
err_tr.append(err_rt_tr)
# On validation set
val_data, val_transcript, \
targ_indices, targ_values, \
targ_shape, n_frames = data_val.next_batch()
n_frames = np.reshape(n_frames, [-1])
feed = {seq_inputs: val_data, indices: targ_indices,values: targ_values, shape: targ_shape, seq_lens: n_frames}
del val_data, val_transcript,targ_indices,targ_values,targ_shape,n_frames
vl_loss, l_val_pr, err_rt_vl = sess.run([loss, log_prob, error_rate], feed_dict=feed)
del feed
loss_vl.append(vl_loss)
log_vl.append(l_val_pr)
err_vl.append(err_rt_vl)
print("epoch %d, step: %d, tr_loss: %.2f, vl_loss: %.2f, tr_err: %.2f, vl_err: %.2f"
% (epoch, step, np.mean(loss_tr), np.mean(loss_vl), err_rt_tr, err_rt_vl))
end_time = time.time()
elapsed = round(end_time - start_time, 3)
# On training set
# Select a random index within batch_size
sample_index = np.random.randint(0, batch_size)
# Fetch the target transcript
actual_str = [data_train.reverse_map[i] for i in transcript[sample_index]]
# Fetch the decoded path from probability map
pred_sparse = tf.SparseTensor(deco[0].indices, deco[0].values, deco[0].shape)
pred_dense = tf.sparse_tensor_to_dense(pred_sparse)
ans = pred_dense.eval()
#pred = [data_train.reverse_map[i] for i in ans[sample_index, :]]
pred = []
for i in ans[sample_index,:]:
if i == n_chars-1:
pred.append(data_train.reverse_map[0])
else:
pred.append(data_train.reverse_map[i])
print("time_elapsed for 200 steps: %.3f, " % (elapsed))
if epoch%2 == 0:
print("Sample mini-batch results: \n" \
"predicted string: ", np.array(pred))
print("actual string: ", np.array(actual_str))
print("On training set, the loss: %.2f, log_pr: %.3f, error rate %.3f:"% (loss_val, np.mean(l_pr), err_rt_tr))
print("On validation set, the loss: %.2f, log_pr: %.3f, error rate: %.3f" % (vl_loss, np.mean(l_val_pr), err_rt_vl))
# Save the trainable parameters after the end of an epoch
if epoch > 7:
path = saver.save(sess, 'model_%d' % epoch)
print("Session saved at: %s" % path)
np.save(results_fn, np.array([loss_tr, log_tr, loss_vl, log_vl, err_tr, err_vl], dtype=np.object))
except (KeyboardInterrupt, SystemExit, Exception), e:
print("Error/Interruption: %s" % str(e))
exc_type, exc_obj, exc_tb = sys.exc_info()
print("Line no: %d" % exc_tb.tb_lineno)
if epoch > 7:
print("Saving model: %s" % saver.save(sess, 'Last.cpkt'))
print("Current batch: %d" % data_train.b_id)
print("Current epoch: %d" % epoch)
print("Current step: %d"%step)
np.save(results_fn, np.array([loss_tr, log_tr, loss_vl, log_vl, err_tr, err_vl], dtype=np.object))
print("Clossing TF Session...")
sess.close()
print("Terminating Program...")
sys.exit(0)
I think you need to re-initialize your accumulators for each epoch.
So these ones must be put at the beginning, inside the loop.
loss_tr = []
log_tr = []
loss_vl = []
log_vl = []
err_tr = []
err_vl = []