I am trying to train a neural network for Poker Hand Dataset (10 classes). I have tried to change mnist exampe to fit for this. However, for my program, the accuracy is always about 50%, that is so bothersome. How can I improve the accuracy?
def init_weights(shape):
""" Weight initialization """
weights = tf.random_normal(shape, stddev=0.1)
return tf.Variable(weights)
def forwardprop(X, weights, biases):
"""
Forward-propagation.
IMPORTANT: yhat is not softmax since TensorFlow's softmax_cross_entropy_with_logits() does that internally.
"""
h = tf.nn.sigmoid(tf.add(tf.matmul(X, weights['w_1']),biases['b_1'])) # The \sigma function
yhat = tf.add(tf.matmul(h, weights['w_2']),biases['b_2']) # The \varphi function
return yhat
def get_data(filename, targetname="target", idname="", test_size=0.10, random_state=200):
#read data from csv
df = pd.read_csv(filename)
data = pd.DataFrame(df.ix[:, df.columns != targetname])
if(idname != str("")):
df = df.drop(idname, 1)
data = pd.DataFrame(df.ix[:, df.columns != targetname])
data = pd.get_dummies(data)
all_X = data.as_matrix()
target = df[targetname]
target = pd.factorize(target)[0]
# Convert target into one-hot vectors
num_labels = len(np.unique(target))
all_Y = np.eye(num_labels)[target] # One liner trick!
return train_test_split(all_X, all_Y, test_size=test_size, random_state=random_state)
def main():
start_time = time.time()
train_X, test_X, train_y, test_y = get_data(filename = './data/poker-train.csv', targetname = "class")
#customized for this dataset (or any large dataset), must be chosen as per the data, need to find some generic way
#for small datasets: batch size can be 1 (for more accuracy),
#for large ones: somewhr around 50-80, if taken 1 very slow,50-80 is a trade off of accuracy for time
learning_rate = 0.01
training_epochs = 100
batch_size = 1
# Layer's sizes
x_size = train_X.shape[1] # Number of input nodes
h_size = train_X.shape[1] # Number of hidden nodes
y_size = train_y.shape[1] # Number of outcomes
# Symbols
X = tf.placeholder("float", shape=[None, x_size])
y = tf.placeholder("float", shape=[None, y_size])
# Weight initializations
weights = {
'w_1' : init_weights((x_size, h_size)),
'w_2' : init_weights((h_size, y_size))
}
# Bias initializations
biases = {
'b_1': init_weights([h_size]),
'b_2': init_weights([y_size])
}
# Forward propagation
yhat = forwardprop(X, weights, biases)
predict = tf.argmax(yhat, axis=1)
# Backward propagation
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=yhat))
updates = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# Run SGD
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
total_batch = int(train_X.shape[0]/batch_size)
# Launch the graph
with tf.Session() as sess:
sess.run(init)
for epoch in range(training_epochs):
beg_i=0
# Loop over all batches
for i in range(total_batch):
end_i = beg_i + batch_size
if(end_i > train_X.shape[0]):
end_i = train_X.shape[0]
batch_x, batch_y = train_X[beg_i:end_i,:],train_y[beg_i:end_i,:]
beg_i = beg_i + batch_size
sess.run(updates, feed_dict={X: batch_x, y: batch_y})
train_accuracy = np.mean(np.argmax(train_y, axis=1) == sess.run(predict, feed_dict={X: train_X, y: train_y}))
test_accuracy = np.mean(np.argmax(test_y, axis=1) == sess.run(predict, feed_dict={X: test_X, y: test_y}))
print("Epoch = %d, train accuracy = %.2f%%, test accuracy = %.2f%%"
% (epoch + 1, 100. * train_accuracy, 100. * test_accuracy))
# # Test model
# correct_prediction = tf.equal(tf.argmax(predict, 1), tf.argmax(y, 1))
# # Calculate accuracy
# accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
# print( "Accuracy:", accuracy.eval({X: test_X, y: test_y}))
print("Total time of execution: ",time.time()-start_time)
if __name__ == '__main__':
main()
Output is
Epoch = 100, train accuracy = 55.77%, test accuracy = 55.30%
Epoch = 1, train accuracy = 50.13%, test accuracy = 50.20%
batch_size = 50#1
training_epochs = int(train_X.shape[0]/batch_size)
# Layer's sizes
x_size = train_X.shape[1] # Number of input nodes
h_size = 100#train_X.shape[1] # Number of hidden nodes
y_size = train_y.shape[1] # Number of outcomes
I modify above.
Epoch = 1, train accuracy = 49.98%, test accuracy = 50.11%
Epoch = 500, train accuracy = 90.90%, test accuracy = 90.78%
Related
it is a neural network of 12 layer full connect and 2 layer CNN. I have a sample to training it and want to save the model during the training process.But when I want to use the saving model to predict my new sample, I find that the model didn't save the whole variable.
Here are apart of my save model code.
W_hidden_4 = tf.Variable(weight_initializer([n_neurons_3,n_neurons_4]))
bias_hidden_4 = tf.Variable(bias_initializer([n_neurons_4]))
hidden_4 = tf.nn.relu(tf.add(tf.matmul(hidden_3,W_hidden_4),bias_hidden_4))
# Output layer: Variables for output weights and biases
W_out = tf.Variable(weight_initializer([n_neurons_6,n_rot]),name="W_out")
bias_out = tf.Variable(bias_initializer([n_rot]),name="bias_out")
out = tf.add(tf.matmul(hiddens['hidden_14'], W_out), bias_out,name="out")
# Cost function
# tf.reduce_mean:count the average value
mse = tf.reduce_mean(tf.squared_difference(out, Y))
opt = tf.train.AdamOptimizer().minimize(mse)
# Run initializer
net.run(tf.global_variables_initializer())
for e in range(epochs):
print(str(e) + ":")
# Shuffle training data
shuffle_indices = np.random.permutation(np.arange(len(y_train)))
X_train = X_train[shuffle_indices]
y_train = y_train[shuffle_indices]
# Minibatch training
for i in range(0, len(y_train) // batch_size):
start = i * batch_size
batch_x = X_train[start:start + batch_size]
batch_y = y_train[start:start + batch_size]
# Run optimizer with batch
net.run(opt, feed_dict={X: batch_x, Y: batch_y})
# Show progress
if np.mod(i, 50) == 0:
# Prediction
pred = net.run(out, feed_dict={X: X_test})
mse_final = net.run(mse, feed_dict={X: batch_x, Y: batch_y})
print(mse_final)
if e%50 == 0:
model_path = "/home/student/fulldata/src/fc_bigpara/model_" + str(e/50)
save_path = saver.save(net, model_path)
and the following is my restore code
X_test = np.loadtxt('test_x.txt')
sess = tf.Session()
# First let's load meta graph and restore weights
saver =tf.train.import_meta_graph('/home/student/fulldata/src/old_model/fc_bigpara_14/model_19.0.meta')
#all_vars = tf.trainable_variables()
#for v in all_vars:
# print(v.name)
#print v.name,v.eval(self.sess)
saver.restore(sess,
"/home/student/fulldata/src/old_model/fc_bigpara_14/model_19.0")
all_vars = tf.trainable_variables()
for v in all_vars:
#print(v.name, v.eval(sess))
print(v.name)
print(v.shape)
# Now, let's access and create placeholders variables and
# create feed-dict to feed new data
graph = tf.get_default_graph()
X = tf.placeholder(dtype=tf.float32, shape=[None, 3])
Y = tf.placeholder(dtype=tf.float32, shape=[None, 6])
out=graph.get_tensor_by_name("Variable_25:0")
#todo
with open("result.txt","w") as f:
#for i in range(0, len(X_test)):
#start=i*batch_size
#batch_x = X_test[start:start + batch_size]
#batch_x=X_test[i]
feed_dict={X:X_test}
result=sess.run(out,feed_dict)
#print(result.shape)
I can't find the parameter "out" in the model't variable and I have add "name ='out'" but it can't work.So I can't run the code following
result=sess.run(out,feed_dict)
how can I modify my code to fix the bug??
I've been trying to solve a prediction/regression problem by using Tensor Flow, but i'm facing some problems. Let me give you some context before I explain my real problem.
The data I've been playing with are a set of 5 features, let's call them [f1, f2, f3, f4, f5], representing in somehow a particular phenomena identified by a real value (target).
What I've been trying to do is to train a Multi Layer Perceptron that learns the relationship among the features and the target values. In a nutshell i'd like to predict real values on the base of what the neural network as seen during the training phase.
I've identified this problem as prediction/regression problem and wrote down the following code:
#picking device to run on
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
# Parameters
learning_rate = 0.001
training_epochs = 99999
batch_size = 4096
STDDEV = 0.1
# Network Parameters
n_hidden_1 = 10 # 1st layer number of neurons
n_hidden_2 = 10 # 2nd layer number of neurons
n_hidden_3 = 10 # 3nd layer number of neurons
n_hidden_4 = 10 # 4nd layer number of neurons
n_hidden_5 = 10 # 5nd layer number of neurons
n_input = 5 # number of features
n_classes = 1 # one target value (float)
# tf Graph input
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])
# LOADING DATA
data_train = loader.loadDataset(dir_feat, train_path, 'TRAIN', features)
data_test = loader.loadDataset(dir_feat, test_path, 'TEST', features)
valid_period = 5
test_period = 10
def multilayer_perceptron(x, weights, biases):
# Hidden layer with sigmoid activation
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.sigmoid(layer_1)
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.sigmoid(layer_2)
layer_3 = tf.add(tf.matmul(layer_2, weights['h3']), biases['b3'])
layer_3= tf.nn.sigmoid(layer_3)
layer_4 = tf.add(tf.matmul(layer_3, weights['h4']), biases['b4'])
layer_4 = tf.nn.sigmoid(layer_4)
layer_5 = tf.add(tf.matmul(layer_4, weights['h5']), biases['b5'])
layer_5 = tf.nn.sigmoid(layer_5)
# Output layer with linear activation
out = tf.matmul(layer_5, weights['out']) + biases['out']
return out
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1],stddev=STDDEV)),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2],stddev=STDDEV)),
'h3': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_3],stddev=STDDEV)),
'h4': tf.Variable(tf.random_normal([n_hidden_3, n_hidden_4],stddev=STDDEV)),
'h5': tf.Variable(tf.random_normal([n_hidden_4, n_hidden_5],stddev=STDDEV)),
'out': tf.Variable(tf.random_normal([n_hidden_5, n_classes],stddev=STDDEV))
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'b3': tf.Variable(tf.random_normal([n_hidden_3])),
'b4': tf.Variable(tf.random_normal([n_hidden_4])),
'b5': tf.Variable(tf.random_normal([n_hidden_5])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
# Construct model
pred = multilayer_perceptron(x, weights, biases)
def RMSE():
return tf.sqrt(tf.reduce_mean(tf.square(y - pred)))
cost = RMSE()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)
# Initializing the variables
init = tf.initialize_all_variables()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Training cycle
for epoch in range(1, training_epochs):
avg_cost = 0.
avg_R_square_train = []
train_dataset = loader.Dataset(data=data_train, batch_size=batch_size, num_feats=n_input)
total_batch = train_dataset.getNumberBatches()
# Loop over all batches
for i in range(total_batch):
batch_x, batch_y = train_dataset.next_batch(update=True)
# Run optimization op (backprop) and cost op (to get loss value)
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
c_train = sess.run(cost, feed_dict={x: batch_x, y: batch_y})
# Compute average loss
avg_cost += c_train / total_batch
print("Epoch:" + str(epoch) + ", TRAIN_loss = {:.9f}".format(avg_cost))
# TESTING
if epoch % test_period == 0:
c_test = sess.run(cost, feed_dict={x: data_test[0][0], y: data_test[0][1]})
print("Epoch:" + str(epoch) + ", TEST_loss = {:.9f}".format(c_test))
The problem I've encountered is that the cost function for the test set (after some iterations) got stuck in a local minima and doesn't decrease anymore.
Epoch:6697, TRAIN_loss = 2.162182076
Epoch:6698, TRAIN_loss = 2.156500859
Epoch:6699, TRAIN_loss = 2.157814605
Epoch:6700, TRAIN_loss = 2.160744122
Epoch:6700, TEST_loss = 2.301288128
Epoch:6701, TRAIN_loss = 2.139338647
...
Epoch:6709, TRAIN_loss = 2.166410744
Epoch:6710, TRAIN_loss = 2.162357884
Epoch:6710, TEST_loss = 2.301478863
Epoch:6711, TRAIN_loss = 2.143475396
...
Epoch:6719, TRAIN_loss = 2.145476401
Epoch:6720, TRAIN_loss = 2.150237552
Epoch:6720, TEST_loss = 2.301517725
Epoch:6721, TRAIN_loss = 2.151232243
...
Epoch:6729, TRAIN_loss = 2.163080522
Epoch:6730, TRAIN_loss = 2.160523321
Epoch:6730, TEST_loss = 2.301782370
...
Epoch:6739, TRAIN_loss = 2.156920952
Epoch:6740, TRAIN_loss = 2.162290675
Epoch:6740, TEST_loss = 2.301943779
...
I've tried to change the several hyper-parameters such as the number of hidden layer and/or the number of nodes, the learning rate, the batch size, etc but the situation doesn't change at all. I have also tried to use other loss function such as MAE, MSE.
Actually the number of data sample i have is roughly 270,000.
Can someone please suggest me how to solve this problem or give me some useful advices about it?
Thanks in advance.
Davide
I'm trying to implement a 1 hidden layer NN for a regression problem. The loss function improves for a few iterations than it gets stuck on a really high error even for a very easy data. Could someone help me find the bug? Here is my code:
import tensorflow as tf
import scipy.io as sio
import numpy as np
reuse_weights = 1
n_nodes_hl1 = 10
batch_size = 200
hm_epochs = 20
# load input from matlab
input_training = sio.loadmat('xMat.mat')
input_training = input_training['xMat']
input_test = sio.loadmat('xMat.mat')
input_test = input_test['xMat']
# find number of measurements and input length
n_measurements = input_training.shape[0]
input_length = input_training.shape[1]
# current input
data_y = input_training[:, input_length - 1].astype(float)
data_x = input_training[:, 0 : input_length - 1].astype(float)
test_data_y = input_test[:, input_length - 1].astype(float)
test_data_x = input_test[:, 0 : input_length - 1].astype(float)
x = tf.placeholder('float32',[None, input_length - 1])
y = tf.placeholder('float32')
# place holder for Dropout algorithm drop probability
keep_prob = tf.placeholder('float32')
def next_batch(data):
"""
Return a total of `batch_size` samples from the array `data`.
"""
if len(data.shape) == 2:
idx = np.arange(0, len(data[:,0])) # get all possible indexes
else:
idx = np.arange(0, len(data)) # get all possible indexes
np.random.shuffle(idx) # shuffle indexes
idx = idx[0:batch_size] # use only `batch_size` random indexes
if len(data.shape) == 2:
data_shuffle = [data[i,:] for i in idx] # get list of `batch_size` random samples
else:
data_shuffle = [data[i] for i in idx] # get list of `batch_size` random samples
data_shuffle = np.asarray(data_shuffle) # get back numpy array
return data_shuffle
def neural_network_model(data, weights, biases, keep_prob):
layer1 = tf.add(tf.matmul(data, weights['h1']), biases['b1'])
layer1 = tf.nn.sigmoid(layer1)
output = tf.add(tf.matmul(layer1, weights['out']), biases['out'])
return output
if reuse_weights:
weights = {
'h1': tf.Variable(sio.loadmat('weights_h1.mat')['weights_h1'], name="weights_h1"),
'out': tf.Variable(sio.loadmat('weights_out.mat')['weights_out'], name="weights_out")
}
biases = {
'b1': tf.Variable(sio.loadmat('biases_b1.mat')['biases_b1'], name="biases_b1"),
'out': tf.Variable(sio.loadmat('biases_out.mat')['biases_out'], name="biases_out")
}
else: # initialize weights
weights = {
'h1': tf.Variable(tf.random_normal([input_length - 1, n_nodes_hl1]), name="weights_h1"),
'out': tf.Variable(tf.random_normal([n_nodes_hl1, 1]), name="weights_out")
}
biases = {
'b1': tf.Variable(tf.random_normal([n_nodes_hl1]), name="biases_b1"),
'out': tf.Variable(tf.random_normal([1]), name="biases_out")
}
def train_neural_network(x):
prediction = neural_network_model(x, weights, biases, keep_prob)[:,0]
cost = tf.reduce_mean(tf.abs(prediction - y))
optimizer = tf.train.AdamOptimizer()
opt = optimizer.minimize(cost)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print(weights['h1'])
for epoch in range(hm_epochs): #training
epoch_loss = 0
for _ in range(int(n_measurements/batch_size)):
_, c, p = sess.run([opt, cost, prediction], feed_dict = {x:next_batch(data_x),\
y:next_batch(data_y) , keep_prob : 1.0})
epoch_loss += c
print('Epoch', epoch, 'completed out of', hm_epochs, 'Average loss:', epoch_loss/int(n_measurements/batch_size))
# prediction
accuracy = tf.reduce_mean(tf.abs(prediction - y))
# Feed 1.0 for keep prob during testing
print("Training data accuracy:", accuracy.eval({x: data_x, y: data_y, keep_prob : 1.0}))
print("Training data predictions:", prediction.eval({x: data_x[0:5,:], keep_prob : 1.0}))
print("Training data:",data_y[0:5])
#print("Test data accuracy:", accuracy.eval({x: test_data_x, y: test_data_y, keep_prob : 1.0}))
# save numpy arrays
sio.savemat('weights_h1.mat', {'weights_h1': weights['h1'].eval()})
sio.savemat('biases_b1.mat', {'biases_b1': biases['b1'].eval()})
sio.savemat('weights_out.mat', {'weights_out': weights['out'].eval()})
sio.savemat('biases_out.mat', {'biases_out': biases['out'].eval()})
train_neural_network(x)
Figured it out, the problem was with the data shuffling. The input and response were shuffled differently (two times random shuffle for each epoch) and thus the input data in each epoch did not correspond to the response data.
I am trying to build a CNN, I have 8 classes in the input_samples with 45 samples in each class. so total number of input samples are 360. I have divided my first 20 samples as train samples and remaining 25 samples as test samples in each class (My input is a text file and the data is in rows is my preprocessed data, so I am reading the rows in textfile and reshaping the images which are 16x12 size).
I am unable to fix the error in the code
My code:
import numpy as np
import random
import tensorflow as tf
folder = 'D:\\Lab_Project_Files\\TF\\Practice Files\\'
Datainfo = 'dataset_300.txt'
ClassInfo = 'classTrain.txt'
INPUT_WIDTH = 16
IMAGE_HEIGHT = 12
IMAGE_DEPTH = 1
IMAGE_PIXELS = INPUT_WIDTH * IMAGE_HEIGHT # 192 = 12*16
NUM_CLASSES = 8
STEPS = 500
STEP_VALIDATE = 100
BATCH_SIZE = 5
def load_data(file1,file2,folder):
filename1 = folder + file1
filename2 = folder + file2
# loading the data file
x_data = np.loadtxt(filename1, unpack=True)
x_data = np.transpose(x_data)
# loading the class information of the data loaded
y_data = np.loadtxt(filename2, unpack=True)
y_data = np.transpose(y_data)
# divide the data in to test and train data
x_data_train = x_data[np.r_[0:20, 45:65, 90:110, 135:155, 180:200, 225:245, 270:290, 315:335],:]
x_data_test = x_data[np.r_[20:45, 65:90, 110:135, 155:180, 200:225, 245:270, 290:315, 335:360], :]
y_data_train = y_data[np.r_[0:20, 45:65, 90:110, 135:155, 180:200, 225:245, 270:290, 315:335]]
y_data_test = y_data[np.r_[20:45, 65:90, 110:135, 155:180, 200:225, 245:270, 290:315, 335:360],:]
return x_data_train,x_data_test,y_data_train,y_data_test
def reshapedata(data_train,data_test):
data_train = np.reshape(data_train, (len(data_train),INPUT_WIDTH,IMAGE_HEIGHT))
data_test = np.reshape(data_test, (len(data_test), INPUT_WIDTH, IMAGE_HEIGHT))
return data_train,data_test
def batchdata(data,label, batchsize):
# generate random number required to batch data
order_num = random.sample(range(1, len(data)), batchsize)
data_batch = []
label_batch = []
for i in range(len(order_num)):
data_batch.append(data[order_num[i-1]])
label_batch.append(label[order_num[i-1]])
return data_batch, label_batch
# CNN trail
def conv_net(x):
weights = tf.Variable(tf.random_normal([INPUT_WIDTH * IMAGE_HEIGHT * IMAGE_DEPTH, NUM_CLASSES]))
biases = tf.Variable(tf.random_normal([NUM_CLASSES]))
out = tf.add(tf.matmul(x, weights), biases)
return out
sess = tf.Session()
# get filelist and labels for training and testing
data_train,data_test,label_train,label_test = load_data(Datainfo,ClassInfo,folder)
data_train, data_test, = reshapedata(data_train, data_test)
############################ get files for training ####################################################
image_batch, label_batch = batchdata(data_train,label_train,BATCH_SIZE)
# input output placeholders
x = tf.placeholder(tf.float32, [None, IMAGE_PIXELS])
y_ = tf.placeholder(tf.float32,[None, NUM_CLASSES])
# create the network
y = conv_net( x )
# loss
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y, y_))
# train step
train_step = tf.train.AdamOptimizer( 1e-3 ).minimize( cost )
############################## get files for validataion ###################################################
image_batch_test, label_batch_test = batchdata(data_test,label_test,BATCH_SIZE)
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess.run(tf.initialize_all_variables())
################ CNN Program ##############################
for i in range(STEPS):
# checking the accuracy in between.
if i % STEP_VALIDATE == 0:
imgs, lbls = sess.run([image_batch_test, label_batch_test])
print(sess.run(accuracy, feed_dict={x: imgs, y_: lbls}))
imgs, lbls = sess.run([image_batch, label_batch])
sess.run(train_step, feed_dict={x: imgs, y_: lbls})
imgs, lbls = sess.run([image_batch_test, label_batch_test])
print(sess.run(accuracy, feed_dict={ x: imgs, y_: lbls}))
file can be downloaded dataset_300.txt and ClassInfo.txt
Session.run accepts only a list of tensors or tensor names.
imgs, lbls = sess.run([image_batch_test, label_batch_test])
In the previous line, you are passing image_batch_test and label_batch_test which are numpy arrays. I am not sure what you are trying to do by imgs, lbls = sess.run([image_batch_test, label_batch_test])
I want to adapt this Recurrent Neural Network in Tensorflow (from this tutorial
https://github.com/aymericdamien/TensorFlow-Examples/
and then the RNN program)
), so that it will be a denoising autoencoder.
I have 5 time steps, and at each time, the noiseless target is sampled from sin(x), and the noisy input is sin(x)+ Gaussian error.
Now my problem is that the RNN from the example gives me 1 output value for each sequence of inputs, but I want an output for each time step ( I want 5 outputs, not 1)
How do I do this? I suspect it may be a matter of redefining the weights and biases, but how?
Here is the code. Many thanks for your help,
import tensorflow as tf
from tensorflow.python.ops import rnn, rnn_cell
import numpy as np
# Parameters
learning_rate = 0.0005
training_iters = 1000
batch_size = 3
display_step = 100
# Network Parameters
n_input = 2
n_output = 2
n_steps = 5 # timesteps
n_hidden = 40 # hidden layer num of features
# tf Graph input
x = tf.placeholder("float", [None, n_steps, n_input])
y = tf.placeholder("float", [None, n_steps, n_input])
# Define weights
weights = {
'out': tf.Variable(tf.random_normal([n_hidden, n_output]))
}
biases = {
'out': tf.Variable(tf.random_normal([ n_output]))
}
# length of time series to be sampled
N = 1000000
dim_input = 2
x1 = np.zeros(N)
x2 = np.zeros(N)
y1 = np.zeros(N)
y2 = np.zeros(N)
# generate data
for i in range(0,N):
# clean
y1[i] = np.math.sin(i)
y2[i] = np.math.cos(i)
# noisy
x1[i] = y1[i]+np.random.normal(loc=0.0, scale=0.05)
x2[i] = y2[i]+np.random.normal(loc=0.0, scale=0.05)
def next_batch():
batch = np.empty([batch_size,n_steps,dim_input])
batch_y = np.empty([batch_size,n_steps,dim_input])
# for plotting purposes only
inits = np.empty([batch_size], dtype=int)
for b in range(0,batch_size):
# the first one of the batch
inits[b] = int(np.round(np.random.uniform(low=0,high=N-n_steps- 1)))
init = inits[b]
for i in range(0,n_steps):
# noisy input
batch[b,i,0] = x1[init + i]
batch[b,i,1] = x2[init + i]
# target (no noise)"
batch_y[b,i,0] = y1[init+i]
batch_y[b,i,1] = y2[init+i]
return(batch,batch_y,inits)
def RNN(x, weights, biases):
x = tf.transpose(x, [1, 0, 2])
# Reshaping to (n_steps*batch_size, n_input)
x = tf.reshape(x, [-1, n_input])
# Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
x = tf.split(0, n_steps, x)
# Define a lstm cell with tensorflow
lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
# Get lstm cell output
outputs, states = rnn.rnn(lstm_cell, x, dtype=tf.float32)
# Linear activation, using rnn inner loop last output
return tf.matmul(outputs[-1], weights['out']) + biases['out']
pred = RNN(x, weights, biases)
# Define loss and optimizer
# SSE, there must be an easier way to do this
def get_cost(prediction,truth):
z = 0
for i in range(0,batch_size):
z = z + np.square(np.add(prediction[i,:], np.multiply(-1,truth[i,:])))
z = np.add(z[0],z[1])
z = np.sum(z)
return(z)
cost = get_cost(pred,y)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).
minimize(cost)
# Evaluate model
accuracy = cost
# Initializing the variables
init = tf.initialize_all_variables()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
step = 1
# Keep training until reach max iterations
while step * batch_size < training_iters:
print('step '+ str(step))
batch_x, batch_y, inits = next_batch()
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
if step % display_step == 0:
# Calculate batch accuracy
acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y})
# Calculate batch loss
loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y})
print(loss)
step += 1
print("Optimization Finished!")
If I run this, I get this error message:
ValueError: Shape (?, 5, 2) must have rank 2. This seems fair enough, because the target is 5 steps long, and the output only 1. But how do I fix that?
Many thanks.
import tensorflow as tf
from tensorflow.python.ops import rnn, rnn_cell
import numpy as np
import matplotlib.pyplot as plt
## Denoising autoencoder.
import numpy as np
count = 0
# length of time series to be sampled
N = 10000
x1 = np.zeros(N)
x2 = np.zeros(N)
y1 = np.zeros(N)
y2 = np.zeros(N)
batch_size = 30
learning_rate = 0.0005
training_iters = 300000
display_step = 100
# Network Parameters
n_input = 2
n_output = 2
n_steps = 15 # timesteps
n_hidden = 75 # hidden layer num of
# generate data
for i in range(0,N):
# clean
y1[i] = np.math.sin(i)
y2[i] = np.math.cos(i)
# noisy
x1[i] = y1[i]+np.random.normal(loc=0.0, scale=0.1)
x2[i] = y2[i]+np.random.normal(loc=0.0, scale=0.1)
def next_batch():
batch = np.empty([batch_size,n_steps,n_input])
batch_y = np.empty([batch_size,n_steps,n_input])
# for plotting purposes only
inits = np.empty([batch_size], dtype=int)
for b in range(0,batch_size):
# the first one of the batch
inits[b] = int(np.round(np.random.uniform(low=0,high=N-n_steps-1)))
init = inits[b]
for i in range(0,n_steps):
# noisy input
batch[b,i,0] = x1[init + i]
batch[b,i,1] = x2[init + i]
# target (no noise)"
batch_y[b,i,0] = y1[init+i]
batch_y[b,i,1] = y2[init+i]
return(batch,batch_y,inits)
# Parameters
# tf Graph input
x = tf.placeholder("float", [None, n_steps, n_input])
y = tf.placeholder("float", [None, n_steps, n_output])
N_train = N - 500
def RNN(x):
# Prepare data shape to match `rnn` function requirements
# Current data input shape: (batch_size, n_steps, n_input)
# Required shape: 'n_steps' tensors list of shape (batch_size, n_input)
# Permuting batch_size and n_steps
x = tf.transpose(x, [1, 0, 2])
# Reshaping to (n_steps*batch_size, n_input)
x = tf.reshape(x, [-1, n_input])
# Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
x = tf.split(0, n_steps, x)
# Define a lstm cell with tensorflow
lstm_cell = rnn_cell.LSTMCell(num_units = n_hidden, forget_bias=1.0, num_proj=2)
# Get lstm cell output
outputs, states = rnn.rnn(lstm_cell, x, dtype=tf.float32)
return outputs
print(x)
pred = RNN(x)
# Define loss and optimizer
def get_cost(prediction,truth):
#print('pred' + str(prediction))
# SSE. there must be an easier way than this:
z = 0
for step in range(0,n_steps):
for b in range(0,batch_size):
for y_dim in range(0,2):
d1 = prediction[step][b,y_dim]
d2 = truth[b,step,y_dim]
diff= (d1 - d2 )
z = z + diff * diff
return(z)
cost = get_cost(pred,y)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate model
# Initializing the variables
init = tf.initialize_all_variables()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
step = 1
# Keep training until reach max iterations
while step * batch_size < training_iters:
#print('step '+ str(step))
batch_x, batch_y, inits = next_batch()
# Reshape data to get 28 seq of 28 elements
#batch_x = batch_x.reshape((batch_size, n_steps, n_input))
# Run optimization op (backprop)
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
if step % display_step == 0:
# Calculate batch loss
loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y})
print(str(step) + ':' + str(loss))
step += 1
print("Optimization Finished!")
batch_size = 1
test_data, test_label, inits = next_batch()
#print "Testing Accuracy:", \
#sess.run(accuracy, feed_dict={x: test_data, y: test_label})
p2 = sess.run(pred, feed_dict={x: test_data, y: test_label})
#print('---inits---')
#print(inits)
print('---batch---')
print(test_data)
print('---truth---')
print(test_label)
print('---pred---')
print(p2)
c_final = get_cost(p2, test_label)
print(c_final)
First, we generate some data: a 2-dimensional series of sin(i) and cos(i), with i running from 1 to N. This gives us the variable y. Then we add some Normal noise to this series, and that's x. Then, we train a Recurrent Neural Net to create the clean output from the noisy input. In other words, we train the net such that it will output [cos(i),sin(i)] from input [cos(i)+e1,sin(i)+e2) ]. This is a plain vanilla denoising autoencoder, except that the data has a time element. Now you can feed new data into the neural net, and it will hopefully remove the noise.