How to implement metrics learning using siamese neural network in Tensorflow - tensorflow

I'm trying to implement metrics learning using Contrastive Loss like in Caffe example and plot results like in example:
(source: researchgate.net)
I tried to use simple fully connected layers in Caffe and it works well (result as on picture above),
but I get different result
Could anyone help me to find issue in my code or suggest how to implement Caffe example in Tensorflow.
Here is my code:
# In[1]:
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.examples.tutorials.mnist import input_data
from math import sqrt
import numpy as np
from sklearn.manifold import TSNE
get_ipython().magic('matplotlib inline')
get_ipython().magic('pylab inline')
# In[2]:
mnist = input_data.read_data_sets('MNIST_data', one_hot=False)
# In[3]:
learning_rate = 0.00001
training_epochs = 15
batch_size = 100
display_step = 1
logs_path = './tensorflow_logs/mnist_metrics'
# Network Parameters
n_hidden_1 = 256 # 1st layer number of features
n_hidden_2 = 256 # 2nd layer number of features
n_input = 28*28 # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)
margin = 1.0
# In[4]:
x_left = tf.placeholder(tf.float32, shape=[None, n_input], name='InputDataLeft')
x_right = tf.placeholder(tf.float32, shape=[None, n_input], name='InputDataRight')
label = tf.placeholder(tf.float32, shape=[None, 1], name='LabelData') # 0 if the same, 1 is different
x_image_left = x_left
x_image_right = x_right
# In[5]:
# def NN(inputs):
# In[6]:
def tfNN(x, weights, biases):
x = tf.scalar_mul(1.0/256.0, x)
layer_1 = tf.add(tf.matmul(x, weights['w1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
layer_2 = tf.add(tf.matmul(layer_1, weights['w2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
layer_3 = tf.add(tf.matmul(layer_2, weights['w3']), biases['b3'])
out_layer = tf.add(tf.matmul(layer_3, weights['w4']), biases['b4'])
return out_layer
# In[7]:
# Store layers weight & bias
weights = {
'w1': tf.Variable(tf.random_uniform([n_input, n_hidden_1], minval=-4*np.sqrt(6.0/(n_input + n_hidden_1)), maxval=4*np.sqrt(6.0/(n_input + n_hidden_1))), name='W1'),
'w2': tf.Variable(tf.random_uniform([n_hidden_1, n_hidden_2], minval=-4*np.sqrt(6.0/(n_hidden_1 + n_hidden_2)), maxval=4*np.sqrt(6.0/(n_hidden_1 + n_hidden_2))), name='W2'),
'w3': tf.Variable(tf.random_uniform([n_hidden_2, n_classes], minval=-4*np.sqrt(6.0/(n_hidden_2 + n_classes)), maxval=4*np.sqrt(6.0/(n_hidden_2 + n_classes))), name='W3'),
'w4': tf.Variable(tf.random_uniform([n_classes, 2], minval=-4*np.sqrt(6.0/(n_classes + 2)), maxval=4*np.sqrt(6.0/(n_classes + 2))), name='W4')
}
biases = {
'b1': tf.Variable(tf.truncated_normal([n_hidden_1]) / sqrt(n_hidden_1), name='b1'),
'b2': tf.Variable(tf.truncated_normal([n_hidden_2]) / sqrt(n_hidden_2), name='b2'),
'b3': tf.Variable(tf.truncated_normal([n_classes]) / sqrt(n_classes), name='b3'),
'b4': tf.Variable(tf.truncated_normal([2]) / sqrt(2), name='b4')
}
# In[8]:
with tf.name_scope('Model'):
# Model
pred_left = tfNN(x_image_left, weights, biases)
pred_right = tfNN(x_image_right, weights, biases)
with tf.name_scope('Loss'):
# Minimize error using cross entropy
# cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
d = tf.reduce_sum(tf.square(pred_left - pred_right), 1)
d_sqrt = tf.sqrt(d)
loss = label * tf.square(tf.maximum(0.0, margin - d_sqrt)) + (1 - label) * d
loss = 0.5 * tf.reduce_mean(loss)
with tf.name_scope('AdamOptimizer'):
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
# In[9]:
# Initializing the variables
init = tf.global_variables_initializer()
# Create a summary to monitor cost tensor
tf.scalar_summary("loss", loss)
# Merge all summaries into a single op
merged_summary_op = tf.merge_all_summaries()
# In[10]:
# Launch the graph
sess = tf.Session()
sess.run(init)
# op to write logs to Tensorboard
summary_writer = tf.train.SummaryWriter(logs_path, graph=tf.get_default_graph())
# Training cycle
for epoch in range(training_epochs):
avg_loss = 0.0
total_batch = int(mnist.train.num_examples / batch_size)
# Loop over all batches
for i in range(total_batch):
left_batch_xs, left_batch_ys = mnist.train.next_batch(batch_size)
right_batch_xs, right_batch_ys = mnist.train.next_batch(batch_size)
labels = np.zeros((batch_size, 1))
for l in range(batch_size):
if left_batch_ys[l] == right_batch_ys[l]:
labels[l, 0] = 0.0
else:
labels[l, 0] = 1.0
_, l, summary = sess.run([optimizer, loss, merged_summary_op],
feed_dict = {
x_left: left_batch_xs,
x_right: right_batch_xs,
label: labels,
})
# Write logs at every iteration
summary_writer.add_summary(summary, epoch * total_batch + i)
# Compute average loss
avg_loss += l / total_batch
# Display logs per epoch step
if (epoch+1) % display_step == 0:
print ("Epoch:", '%04d' % (epoch+1), "loss =", "{:.9f}".format(avg_loss))
print ("Optimization Finished!")
print ("Run the command line:\n" "--> tensorboard --logdir=./tensorflow_logs " "\nThen open http://0.0.0.0:6006/ into your web browser")
# In[11]:
# Test model
# Calculate accuracy
test_xs, test_ys = mnist.train.next_batch(5000)
ans = sess.run([pred_left], feed_dict = { x_left: test_xs})
# In[12]:
ans = ans[0]
# In[13]:
#test_ys
# In[14]:
figure(figsize=(10,10))
# scatter(r[:,0], r[:,1], c=[test_ys[x,:].argmax() for x in range(len(test_ys))])
scatter(ans[:,0], ans[:,1], c=test_ys[:])

I found issue in my Contrastive Loss implementation. It requires set keep_dims=True in distance calculation.
Here is correct:
with tf.name_scope('Loss'):
d = tf.reduce_sum(tf.square(tf.sub(pred_left, pred_right)), 1, keep_dims=True)
d_sqrt = tf.sqrt(d)
loss = label * tf.square(tf.maximum(0.0, margin - d_sqrt)) + (1 - label) * d
loss = 0.5 * tf.reduce_mean(loss)
Now I have correct result:

Related

tensorflow - nn =>accuracy print error

i use nn using the tensorflow.
multiful input => linear regression .
i'm not exactly tensorflow example..
just i wannna success this example becuase of just checking.
( input data is fruit & water & vegetable
output value is real number(concentration)
So, i think this example is similar.
if you have more good example, please give me .. thank you.
if this source print accuracy , this have a error.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import learn
from sklearn.model_selection import train_test_split
boston = learn.datasets.load_dataset('boston')
x, y = boston.data, boston.target
X_train, X_test, Y_train, Y_test = train_test_split(x, y, test_size=0.6, random_state=42)
total_len = X_train.shape[0]
# Parameters
learning_rate = 0.001
training_epochs = 500
batch_size = 10
display_step = 1
dropout_rate = 0.9
# Network Parameters
n_hidden_1 = 32 # 1st layer number of features
n_hidden_2 = 200 # 2nd layer number of features
n_hidden_3 = 200
n_hidden_4 = 256
n_input = X_train.shape[1]
n_classes = 1
# tf Graph input기
x = tf.placeholder("float", [None,13])
y = tf.placeholder("float", [None])
# Create model
def multilayer_perceptron(x, weights, biases):
# Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
# Hidden layer with RELU activation
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
# Hidden layer with RELU activation
layer_3 = tf.add(tf.matmul(layer_2, weights['h3']), biases['b3'])
layer_3 = tf.nn.relu(layer_3)
# Hidden layer with RELU activation
layer_4 = tf.add(tf.matmul(layer_3, weights['h4']), biases['b4'])
layer_4 = tf.nn.relu(layer_4)
# Output layer with linear activation
out_layer = tf.matmul(layer_4, weights['out']) + biases['out']
return out_layer
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1], 0, 0.1)),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2], 0, 0.1)),
'h3': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_3], 0, 0.1)),
'h4': tf.Variable(tf.random_normal([n_hidden_3, n_hidden_4], 0, 0.1)),
'out': tf.Variable(tf.random_normal([n_hidden_4, n_classes], 0, 0.1))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1], 0, 0.1)),
'b2': tf.Variable(tf.random_normal([n_hidden_2], 0, 0.1)),
'b3': tf.Variable(tf.random_normal([n_hidden_3], 0, 0.1)),
'b4': tf.Variable(tf.random_normal([n_hidden_4], 0, 0.1)),
'out': tf.Variable(tf.random_normal([n_classes], 0, 0.1))
}
# Construct model
pred = multilayer_perceptron(x, weights, biases)
# Define loss and optimizer
cost = tf.reduce_mean(tf.square(tf.transpose(pred)-y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# # Initializing the variables
# init = tf.global_variables_initializer()
# Launch the graph
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(total_len/batch_size)
# Loop over all batches
for i in range(total_batch-1):
batch_x = X_train[i*batch_size:(i+1)*batch_size]
batch_y = Y_train[i*batch_size:(i+1)*batch_size]
# Run optimization op (backprop) and cost op (to get loss value)
_, c, p = sess.run([optimizer, cost, pred], feed_dict={x: batch_x,
y: batch_y})
# Compute average loss
avg_cost += c / total_batch
# sample prediction
label_value = batch_y
estimate = p
err = label_value-estimate
print ("num batch:", total_batch)
# Display logs per epoch step
if epoch % display_step == 0:
print ("Epoch:", '%04d' % (epoch+1), "cost=", \
"{:.9f}".format(avg_cost))
print ("[*]----------------------------")
for i in range(3):
print ("label value:", label_value[i], \
"estimated value:", estimate[i])
print ("[*]============================")
print ("Optimization Finished!")
# Test model
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print ("Accuracy:", accuracy.eval({x: X_test, y: Y_test}))
You compute accuracy outside of the session.
Move it under with tf.Session() as sess:.

Tensorflow why my logistic cost not change?

I am new to tensorflow and now I want to do a classification job by self-defined layers. I have build a model which concat previous several DNN layers output to a big tensor, and then do the logistic binary classification. Like this(but without RNN):
Model Structure
This is my code:
import tensorflow as tf
import numpy as np
import pandas as pd
from load_data import load_data_from_file
TRAIN_FILE = 'train.csv'
TEST_FILE = 'test.csv'
X_train, y_train = load_data_from_file(TRAIN_FILE)
y_train = y_train[np.newaxis]
y_train = y_train.transpose()
n_x_dnn = X_train.shape[1]
n_hidden_dnn = 50
n_merge_dnn1 = 5 * n_hidden_dnn
n_dnn6 = 50
hm_epochs = 10
batch_size = 50
X = tf.placeholder(tf.float32, [None, n_x_dnn])
y = tf.placeholder(tf.float32, [None, 1])
weights = {
'dnn1': tf.Variable(tf.random_normal([n_x_dnn, n_hidden_dnn])),
'dnn2': tf.Variable(tf.random_normal([n_hidden_dnn, n_hidden_dnn])),
'dnn3': tf.Variable(tf.random_normal([n_hidden_dnn, n_hidden_dnn])),
'dnn4': tf.Variable(tf.random_normal([n_hidden_dnn, n_hidden_dnn])),
'dnn5': tf.Variable(tf.random_normal([n_hidden_dnn, n_hidden_dnn])),
'dnn_merge': tf.Variable(tf.random_normal([n_merge_dnn1, n_dnn6])),
'dnn6': tf.Variable(tf.random_normal([n_dnn6, 1]))
}
biases = {
'dnn1_b': tf.Variable(tf.random_normal([n_hidden_dnn])),
'dnn2_b': tf.Variable(tf.random_normal([n_hidden_dnn])),
'dnn3_b': tf.Variable(tf.random_normal([n_hidden_dnn])),
'dnn4_b': tf.Variable(tf.random_normal([n_hidden_dnn])),
'dnn5_b': tf.Variable(tf.random_normal([n_hidden_dnn])),
'dnn_merge_b': tf.Variable(tf.random_normal([n_dnn6])),
'dnn6_b': tf.Variable(tf.random_normal([1])),
}
def define_layers():
# dnn layer 1
dnn_layer1 = tf.add(tf.matmul(X, weights['dnn1']), biases['dnn1_b'])
dnn_layer1 = tf.nn.relu(dnn_layer1)
# dnn layer 2
dnn_layer2 = tf.add(tf.matmul(dnn_layer1, weights['dnn2']), biases['dnn2_b'])
dnn_layer2 = tf.nn.relu(dnn_layer2)
# dnn layer 3
dnn_layer3 = tf.add(tf.matmul(dnn_layer2, weights['dnn3']), biases['dnn3_b'])
dnn_layer3 = tf.nn.relu(dnn_layer3)
# dnn layer 4
dnn_layer4 = tf.add(tf.matmul(dnn_layer3, weights['dnn4']), biases['dnn4_b'])
dnn_layer4 = tf.nn.relu(dnn_layer4)
# dnn layer 5
dnn_layer5 = tf.add(tf.matmul(dnn_layer4, weights['dnn5']), biases['dnn5_b'])
dnn_layer5 = tf.nn.relu(dnn_layer5)
# merge layer
merged = tf.concat([dnn_layer1, dnn_layer2, dnn_layer3, dnn_layer4, dnn_layer5], 1)
dnn_merge = tf.add(tf.matmul(merged, weights['dnn_merge']), biases['dnn_merge_b'])
dnn_merge = tf.nn.relu(dnn_merge)
# dnn layer 6
dnn_layer6 = tf.add(tf.matmul(dnn_merge, weights['dnn6']), biases['dnn6_b'])
dnn_layer6 = tf.nn.sigmoid(dnn_layer6)
return dnn_layer6
logits = define_layers()
cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(hm_epochs):
# epoch_loss = 0
i = 0
while i < len(X_train):
start = i
end = i + batch_size
batch_x = np.array(X_train[start:end])
batch_y = np.array(y_train[start:end])
_, c, l = sess.run([optimizer, cost, logits], feed_dict={X: batch_x,
y: batch_y})
# epoch_loss += c
i += batch_size
# print(c, l)
print('Epoch', epoch, 'completed out of', hm_epochs, 'loss:', c, 'logits:', l)
save_path = saver.save(sess, "model/NF_dnn_and_rnn_model.ckpt")
print("Model saved in file: %s" % save_path)
X_test, y_test = load_data_from_file(TEST_FILE)
predict = sess.run(logits, feed_dict={X: X_test})
print(predict)
And from output it shows my cost not changed:
Epoch 7 completed out of 10 loss: 1.27325 logits: [[ 1.]
Epoch 8 completed out of 10 loss: 1.27325 logits: [[ 1.]
Epoch 9 completed out of 10 loss: 1.27325 logits: [[ 1.]
You should remove the sigmoid activation from your last layer dnn_layer6 as sigmoid_cross_entropy_with_logits also applies sigmoid activation internally.

Tensorflow neural network has high error even in really easy dataset

I'm trying to implement a 1 hidden layer NN for a regression problem. The loss function improves for a few iterations than it gets stuck on a really high error even for a very easy data. Could someone help me find the bug? Here is my code:
import tensorflow as tf
import scipy.io as sio
import numpy as np
reuse_weights = 1
n_nodes_hl1 = 10
batch_size = 200
hm_epochs = 20
# load input from matlab
input_training = sio.loadmat('xMat.mat')
input_training = input_training['xMat']
input_test = sio.loadmat('xMat.mat')
input_test = input_test['xMat']
# find number of measurements and input length
n_measurements = input_training.shape[0]
input_length = input_training.shape[1]
# current input
data_y = input_training[:, input_length - 1].astype(float)
data_x = input_training[:, 0 : input_length - 1].astype(float)
test_data_y = input_test[:, input_length - 1].astype(float)
test_data_x = input_test[:, 0 : input_length - 1].astype(float)
x = tf.placeholder('float32',[None, input_length - 1])
y = tf.placeholder('float32')
# place holder for Dropout algorithm drop probability
keep_prob = tf.placeholder('float32')
def next_batch(data):
"""
Return a total of `batch_size` samples from the array `data`.
"""
if len(data.shape) == 2:
idx = np.arange(0, len(data[:,0])) # get all possible indexes
else:
idx = np.arange(0, len(data)) # get all possible indexes
np.random.shuffle(idx) # shuffle indexes
idx = idx[0:batch_size] # use only `batch_size` random indexes
if len(data.shape) == 2:
data_shuffle = [data[i,:] for i in idx] # get list of `batch_size` random samples
else:
data_shuffle = [data[i] for i in idx] # get list of `batch_size` random samples
data_shuffle = np.asarray(data_shuffle) # get back numpy array
return data_shuffle
def neural_network_model(data, weights, biases, keep_prob):
layer1 = tf.add(tf.matmul(data, weights['h1']), biases['b1'])
layer1 = tf.nn.sigmoid(layer1)
output = tf.add(tf.matmul(layer1, weights['out']), biases['out'])
return output
if reuse_weights:
weights = {
'h1': tf.Variable(sio.loadmat('weights_h1.mat')['weights_h1'], name="weights_h1"),
'out': tf.Variable(sio.loadmat('weights_out.mat')['weights_out'], name="weights_out")
}
biases = {
'b1': tf.Variable(sio.loadmat('biases_b1.mat')['biases_b1'], name="biases_b1"),
'out': tf.Variable(sio.loadmat('biases_out.mat')['biases_out'], name="biases_out")
}
else: # initialize weights
weights = {
'h1': tf.Variable(tf.random_normal([input_length - 1, n_nodes_hl1]), name="weights_h1"),
'out': tf.Variable(tf.random_normal([n_nodes_hl1, 1]), name="weights_out")
}
biases = {
'b1': tf.Variable(tf.random_normal([n_nodes_hl1]), name="biases_b1"),
'out': tf.Variable(tf.random_normal([1]), name="biases_out")
}
def train_neural_network(x):
prediction = neural_network_model(x, weights, biases, keep_prob)[:,0]
cost = tf.reduce_mean(tf.abs(prediction - y))
optimizer = tf.train.AdamOptimizer()
opt = optimizer.minimize(cost)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print(weights['h1'])
for epoch in range(hm_epochs): #training
epoch_loss = 0
for _ in range(int(n_measurements/batch_size)):
_, c, p = sess.run([opt, cost, prediction], feed_dict = {x:next_batch(data_x),\
y:next_batch(data_y) , keep_prob : 1.0})
epoch_loss += c
print('Epoch', epoch, 'completed out of', hm_epochs, 'Average loss:', epoch_loss/int(n_measurements/batch_size))
# prediction
accuracy = tf.reduce_mean(tf.abs(prediction - y))
# Feed 1.0 for keep prob during testing
print("Training data accuracy:", accuracy.eval({x: data_x, y: data_y, keep_prob : 1.0}))
print("Training data predictions:", prediction.eval({x: data_x[0:5,:], keep_prob : 1.0}))
print("Training data:",data_y[0:5])
#print("Test data accuracy:", accuracy.eval({x: test_data_x, y: test_data_y, keep_prob : 1.0}))
# save numpy arrays
sio.savemat('weights_h1.mat', {'weights_h1': weights['h1'].eval()})
sio.savemat('biases_b1.mat', {'biases_b1': biases['b1'].eval()})
sio.savemat('weights_out.mat', {'weights_out': weights['out'].eval()})
sio.savemat('biases_out.mat', {'biases_out': biases['out'].eval()})
train_neural_network(x)
Figured it out, the problem was with the data shuffling. The input and response were shuffled differently (two times random shuffle for each epoch) and thus the input data in each epoch did not correspond to the response data.

How can this tensorflow model converge on a CPU but not on a GPU?

We ran into the strange problem that our relatively simple model converges on the CPU, but not on the server with GPU. No modifications to the code are done whatsoever between the two runs. Nor does the code contain any explicit conditional statements to change the workflow on different architectures.
What could possibly be the reason?
How can this tensorflow model converge on a CPU but not on a GPU?
In the likely event that the code is too long for you to read we are still thankful about general speculations and hints.
#!/usr/bin/python
from __future__ import print_function
import tensorflow as tf
import os
import numpy as np
import input_data # copy from tensorflow/examples/tutorials/mnist/input_data.py
# wget https://raw.githubusercontent.com/tensorflow/tensorflow/master/tensorflow/examples/tutorials/mnist/input_data.py if needed
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
force_gpu = False
debug = True # histogram_summary ...
# _cpu='/cpu:0'
default_learning_rate=0.001
tensorboard_logs = '/tmp/tensorboard-logs/'
# $(sleep 5; open http://0.0.0.0:6006) & tensorboard --debug --logdir=/tmp/tensorboard-logs/
class net():
def __init__(self,model,data,name=0,learning_rate=default_learning_rate,batch_size=64):
self.session=sess=session=tf.Session()
self.model=model
self.data=data # assigned to self.x=net.input via train
self.batch_size=batch_size
self.layers=[]
self.last_width=self.input_width(data)
self.learning_rate=learning_rate
self.generate_model(model)
def generate_model(self,model, name=''):
if not model: return self
with tf.name_scope('state'):
self.keep_prob = tf.placeholder(tf.float32) # 1 for testing! else 1 - dropout
self.train_phase = tf.placeholder(tf.bool, name='train_phase')
self.global_step = tf.Variable(0) # dont set, feed or increment global_step, tensorflow will do it automatically
with tf.name_scope('data'):
n_input=28*28
n_classes=10
self.x = x = self.input = tf.placeholder(tf.float32, [None, n_input])
self.last_layer=x
self.y = y = self.target = tf.placeholder(tf.float32, [None, n_classes])
if not force_gpu: tf.image_summary("mnist", tf.reshape(self.x, [-1, 28, 28, 1], "mnist_images"))
with tf.name_scope('model'):
model(self)
if(self.last_width!=n_classes): self.classifier() # 10 classes auto
def input_width(self,data):
return 28*28
def add(self, layer):
self.layers.append(layer)
self.last_layer = layer
self.last_shape = layer.get_shape()
def reshape(self,shape):
self.last_layer = tf.reshape(self.last_layer,shape)
self.last_shape = shape
self.last_width = shape[-1]
def batchnorm(self):
from tensorflow.contrib.layers.python.layers import batch_norm as batch_norm
with tf.name_scope('batchnorm') as scope:
input = self.last_layer
train_op=batch_norm(input, is_training=True, center=False, updates_collections=None, scope=scope)
test_op=batch_norm(input, is_training=False, updates_collections=None, center=False,scope=scope, reuse=True)
self.add(tf.cond(self.train_phase,lambda:train_op,lambda:test_op))
# Fully connected layer
def dense(self, hidden=1024, depth=1, act=tf.nn.tanh, dropout=False, parent=-1): #
if parent==-1: parent=self.last_layer
shape = self.last_layer.get_shape()
if shape and len(shape)>2:
self.last_width= int(shape[1]*shape[2]*shape[3])
print("reshapeing ",shape,"to",self.last_width)
parent = tf.reshape(parent, [-1, self.last_width])
width = hidden
while depth>0:
with tf.name_scope('Dense_{:d}'.format(hidden)) as scope:
print("Dense ", self.last_width, width)
nr = len(self.layers)
# if self.last_width == width:
# M = closest_unitary(np.random.rand(self.last_width, width) / (self.last_width + width))
# weights = tf.Variable(m, name="weights_dense_" + str(nr))
# else:
weights = tf.Variable(tf.random_uniform([self.last_width, width], minval=-1. / width, maxval=1. / width), name="weights_dense")
bias = tf.Variable(tf.random_uniform([width],minval=-1./width,maxval=1./width), name="bias_dense")
dense1 = tf.matmul(parent, weights, name='dense_'+str(nr))+ bias
tf.histogram_summary('dense_'+str(nr),dense1)
tf.histogram_summary('weights_'+str(nr),weights)
tf.histogram_summary('bias_'+str(nr),bias)
tf.histogram_summary('dense_'+str(nr)+'/sparsity', tf.nn.zero_fraction(dense1))
tf.histogram_summary('weights_'+str(nr)+'/sparsity', tf.nn.zero_fraction(weights))
if act: dense1 = act(dense1)
# if norm: dense1 = self.norm(dense1,lsize=1) # SHAPE!
if dropout: dense1 = tf.nn.dropout(dense1, self.keep_prob)
self.layers.append(dense1)
self.last_layer = parent = dense1
self.last_width = width
depth=depth-1
self.last_shape=[-1,width] # dense
# Convolution Layer
def conv(self,shape,act=tf.nn.relu,pool=True,dropout=False,norm=True,name=None): # True why dropout bad in tensorflow??
with tf.name_scope('conv'):
print("input shape ",self.last_shape)
print("conv shape ",shape)
width=shape[-1]
filters=tf.Variable(tf.random_normal(shape))
# filters = tf.Variable(tf.random_uniform(shape, minval=-1. / width, maxval=1. / width), name="filters")
_bias=tf.Variable(tf.random_normal([shape[-1]]))
# # conv1 = conv2d('conv', _X, _weights, _bias)
conv1=tf.nn.bias_add(tf.nn.conv2d(self.last_layer,filter=filters, strides=[1, 1, 1, 1], padding='SAME'), _bias)
if debug: tf.histogram_summary('conv_' + str(len(self.layers)), conv1)
if act: conv1=act(conv1)
if pool: conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
if norm: conv1 = tf.nn.lrn(conv1, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
if debug: tf.histogram_summary('norm_' + str(len(self.layers)), conv1)
if dropout: conv1 = tf.nn.dropout(conv1,self.keep_prob)
print("output shape ",conv1.get_shape())
self.add(conv1)
def classifier(self,classes=10): # Define loss and optimizer
with tf.name_scope('prediction'):# prediction
if self.last_width!=classes:
# print("Automatically adding dense prediction")
self.dense(hidden=classes, act= False, dropout = False)
# cross_entropy = -tf.reduce_sum(y_*y)
with tf.name_scope('classifier'):
y_=self.target
manual=False # True
if classes>100:
print("using sampled_softmax_loss")
y=prediction=self.last_layer
self.cost = tf.reduce_mean(tf.nn.sampled_softmax_loss(y, y_)) # for big vocab
elif manual:
# prediction = y =self.last_layer=tf.nn.softmax(self.last_layer)
# self.cost = cross_entropy = -tf.reduce_sum(y_ * tf.log(y+ 1e-10)) # against NaN!
prediction = y = tf.nn.log_softmax(self.last_layer)
self.cost = cross_entropy = -tf.reduce_sum(y_ * y)
else:
y = prediction = self.last_layer
self.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y, y_)) # prediction, target
# if not gpu:
tf.scalar_summary('cost', self.cost)
# self.cost = tf.Print(self.cost , [self.cost ], "debug cost : ")
learning_scheme=self.learning_rate
# learning_scheme=tf.train.exponential_decay(self.learning_rate, self.global_step, decay_steps, decay_size)
self.optimizer = tf.train.AdamOptimizer(learning_scheme).minimize(self.cost)
# Evaluate model
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(self.target, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
if not force_gpu: tf.scalar_summary('accuracy', self.accuracy)
# Launch the graph
def next_batch(self,batch_size=10):
return self.data.train.next_batch(batch_size)
def train(self,steps=-1,dropout=None,display_step=10,test_step=200): #epochs=-1,
steps = 9999999 if steps==-1 else steps
session=self.session
# with tf.device(_cpu):
# import tensorflow.contrib.layers as layers
# t = tf.verify_tensor_all_finite(t, msg)
tf.add_check_numerics_ops()
self.summaries = tf.merge_all_summaries()
self.summary_writer = tf.train.SummaryWriter(tensorboard_logs, session.graph) #
if not dropout:dropout=1. # keep all
x=self.x
y=self.y
keep_prob=self.keep_prob
session.run([tf.initialize_all_variables()])
step = 0 # show first
while step < steps:
# print("step %d \r" % step)# end=' ')
batch_xs, batch_ys = self.next_batch(self.batch_size)
# tf.train.shuffle_batch_join(example_list, batch_size, capacity=min_queue_size + batch_size * 16, min_queue_size)
# Fit training using batch data
feed_dict = {x: batch_xs, y: batch_ys, keep_prob: dropout, self.train_phase: True}
loss,_= session.run([self.cost,self.optimizer], feed_dict=feed_dict)
if step % test_step == 0: self.test(step)
if step % display_step == 0:
# Calculate batch accuracy, loss
feed = {x: batch_xs, y: batch_ys, keep_prob: 1., self.train_phase: False}
acc , summary = session.run([self.accuracy,self.summaries], feed_dict=feed)
# self.summary_writer.add_summary(summary, step) # only test summaries for smoother curve
print("\rStep {:d} Loss= {:.6f} Accuracy= {:.3f}".format(step,loss,acc),end=' ')
if str(loss)=="nan": return print("\nLoss gradiant explosion, exiting!!!") #restore!
step += 1
print("\nOptimization Finished!")
self.test(step,number=10000) # final test
def inputs(self,data):
self.inputs, self.labels = load_data()#...)
def test(self,step,number=400):#256
session=sess=self.session
run_metadata = tf.RunMetadata()
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
# Calculate accuracy for 256 mnist test images
test_labels = self.data.test.labels[:number]
test_images = self.data.test.images[:number]
feed_dict = {self.x: test_images, self.y: test_labels, self.keep_prob: 1., self.train_phase:False}
accuracy,summary= self.session.run([self.accuracy, self.summaries], feed_dict=feed_dict)
# accuracy,summary = session.run([self.accuracy, self.summaries], feed_dict, run_options, run_metadata)
print('\t'*3+"Test Accuracy:",accuracy)
# self.summary_writer.add_run_metadata(run_metadata, 'step #%03d' % step)
self.summary_writer.add_summary(summary,global_step=step)
def dense(net): # best with lr ~0.001
# type: (layer.net) -> None
# net.batchnorm() # start lower, else no effect
# net.dense(400,act=None)# # ~95% we can do better:
net.dense(400, act=tf.nn.tanh)# 0.996 YAY only 0.985 on full set, Step 5000 flat
return # 0.957% without any model!!
def alex(net):
# type: (layer.net) -> None
print("Building Alex-net")
net.reshape(shape=[-1, 28, 28, 1]) # Reshape input pictures
# net.batchnorm()
net.conv([3, 3, 1, 64])
net.conv([3, 3, 64, 128])
net.conv([3, 3, 128, 256])
net.dense(1024,act=tf.nn.relu)
net.dense(1024,act=tf.nn.relu)
# net=layer.net(dense,data=mnist, learning_rate=0.01 )#,'mnist' baseline
_net=net(alex,data=mnist, learning_rate=0.001)#,'mnist'
_net.train(50000,dropout=0.6,display_step=1,test_step=10)
In general floating point computations can be a little nondeterministic when it comes to adding many numbers (and some GPUs are buggy). Did you try retuning hyperparameters (varying learning rates and whatnot) to account for this?
I faced the same problem in the past. I solved it by following the tutorial at this link: https://www.tensorflow.org/install/pip#windows-native
Apparently, conda installation of TensorFlow is not recommended.
As an extra tip, if you're working with GPU avoid using TensorFlow 2.3. Apparently, it has some installation issues.

How to write denoising autoencoder as RNN with tensorflow

I want to adapt this Recurrent Neural Network in Tensorflow (from this tutorial
https://github.com/aymericdamien/TensorFlow-Examples/
and then the RNN program)
), so that it will be a denoising autoencoder.
I have 5 time steps, and at each time, the noiseless target is sampled from sin(x), and the noisy input is sin(x)+ Gaussian error.
Now my problem is that the RNN from the example gives me 1 output value for each sequence of inputs, but I want an output for each time step ( I want 5 outputs, not 1)
How do I do this? I suspect it may be a matter of redefining the weights and biases, but how?
Here is the code. Many thanks for your help,
import tensorflow as tf
from tensorflow.python.ops import rnn, rnn_cell
import numpy as np
# Parameters
learning_rate = 0.0005
training_iters = 1000
batch_size = 3
display_step = 100
# Network Parameters
n_input = 2
n_output = 2
n_steps = 5 # timesteps
n_hidden = 40 # hidden layer num of features
# tf Graph input
x = tf.placeholder("float", [None, n_steps, n_input])
y = tf.placeholder("float", [None, n_steps, n_input])
# Define weights
weights = {
'out': tf.Variable(tf.random_normal([n_hidden, n_output]))
}
biases = {
'out': tf.Variable(tf.random_normal([ n_output]))
}
# length of time series to be sampled
N = 1000000
dim_input = 2
x1 = np.zeros(N)
x2 = np.zeros(N)
y1 = np.zeros(N)
y2 = np.zeros(N)
# generate data
for i in range(0,N):
# clean
y1[i] = np.math.sin(i)
y2[i] = np.math.cos(i)
# noisy
x1[i] = y1[i]+np.random.normal(loc=0.0, scale=0.05)
x2[i] = y2[i]+np.random.normal(loc=0.0, scale=0.05)
def next_batch():
batch = np.empty([batch_size,n_steps,dim_input])
batch_y = np.empty([batch_size,n_steps,dim_input])
# for plotting purposes only
inits = np.empty([batch_size], dtype=int)
for b in range(0,batch_size):
# the first one of the batch
inits[b] = int(np.round(np.random.uniform(low=0,high=N-n_steps- 1)))
init = inits[b]
for i in range(0,n_steps):
# noisy input
batch[b,i,0] = x1[init + i]
batch[b,i,1] = x2[init + i]
# target (no noise)"
batch_y[b,i,0] = y1[init+i]
batch_y[b,i,1] = y2[init+i]
return(batch,batch_y,inits)
def RNN(x, weights, biases):
x = tf.transpose(x, [1, 0, 2])
# Reshaping to (n_steps*batch_size, n_input)
x = tf.reshape(x, [-1, n_input])
# Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
x = tf.split(0, n_steps, x)
# Define a lstm cell with tensorflow
lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
# Get lstm cell output
outputs, states = rnn.rnn(lstm_cell, x, dtype=tf.float32)
# Linear activation, using rnn inner loop last output
return tf.matmul(outputs[-1], weights['out']) + biases['out']
pred = RNN(x, weights, biases)
# Define loss and optimizer
# SSE, there must be an easier way to do this
def get_cost(prediction,truth):
z = 0
for i in range(0,batch_size):
z = z + np.square(np.add(prediction[i,:], np.multiply(-1,truth[i,:])))
z = np.add(z[0],z[1])
z = np.sum(z)
return(z)
cost = get_cost(pred,y)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).
minimize(cost)
# Evaluate model
accuracy = cost
# Initializing the variables
init = tf.initialize_all_variables()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
step = 1
# Keep training until reach max iterations
while step * batch_size < training_iters:
print('step '+ str(step))
batch_x, batch_y, inits = next_batch()
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
if step % display_step == 0:
# Calculate batch accuracy
acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y})
# Calculate batch loss
loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y})
print(loss)
step += 1
print("Optimization Finished!")
If I run this, I get this error message:
ValueError: Shape (?, 5, 2) must have rank 2. This seems fair enough, because the target is 5 steps long, and the output only 1. But how do I fix that?
Many thanks.
import tensorflow as tf
from tensorflow.python.ops import rnn, rnn_cell
import numpy as np
import matplotlib.pyplot as plt
## Denoising autoencoder.
import numpy as np
count = 0
# length of time series to be sampled
N = 10000
x1 = np.zeros(N)
x2 = np.zeros(N)
y1 = np.zeros(N)
y2 = np.zeros(N)
batch_size = 30
learning_rate = 0.0005
training_iters = 300000
display_step = 100
# Network Parameters
n_input = 2
n_output = 2
n_steps = 15 # timesteps
n_hidden = 75 # hidden layer num of
# generate data
for i in range(0,N):
# clean
y1[i] = np.math.sin(i)
y2[i] = np.math.cos(i)
# noisy
x1[i] = y1[i]+np.random.normal(loc=0.0, scale=0.1)
x2[i] = y2[i]+np.random.normal(loc=0.0, scale=0.1)
def next_batch():
batch = np.empty([batch_size,n_steps,n_input])
batch_y = np.empty([batch_size,n_steps,n_input])
# for plotting purposes only
inits = np.empty([batch_size], dtype=int)
for b in range(0,batch_size):
# the first one of the batch
inits[b] = int(np.round(np.random.uniform(low=0,high=N-n_steps-1)))
init = inits[b]
for i in range(0,n_steps):
# noisy input
batch[b,i,0] = x1[init + i]
batch[b,i,1] = x2[init + i]
# target (no noise)"
batch_y[b,i,0] = y1[init+i]
batch_y[b,i,1] = y2[init+i]
return(batch,batch_y,inits)
# Parameters
# tf Graph input
x = tf.placeholder("float", [None, n_steps, n_input])
y = tf.placeholder("float", [None, n_steps, n_output])
N_train = N - 500
def RNN(x):
# Prepare data shape to match `rnn` function requirements
# Current data input shape: (batch_size, n_steps, n_input)
# Required shape: 'n_steps' tensors list of shape (batch_size, n_input)
# Permuting batch_size and n_steps
x = tf.transpose(x, [1, 0, 2])
# Reshaping to (n_steps*batch_size, n_input)
x = tf.reshape(x, [-1, n_input])
# Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
x = tf.split(0, n_steps, x)
# Define a lstm cell with tensorflow
lstm_cell = rnn_cell.LSTMCell(num_units = n_hidden, forget_bias=1.0, num_proj=2)
# Get lstm cell output
outputs, states = rnn.rnn(lstm_cell, x, dtype=tf.float32)
return outputs
print(x)
pred = RNN(x)
# Define loss and optimizer
def get_cost(prediction,truth):
#print('pred' + str(prediction))
# SSE. there must be an easier way than this:
z = 0
for step in range(0,n_steps):
for b in range(0,batch_size):
for y_dim in range(0,2):
d1 = prediction[step][b,y_dim]
d2 = truth[b,step,y_dim]
diff= (d1 - d2 )
z = z + diff * diff
return(z)
cost = get_cost(pred,y)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate model
# Initializing the variables
init = tf.initialize_all_variables()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
step = 1
# Keep training until reach max iterations
while step * batch_size < training_iters:
#print('step '+ str(step))
batch_x, batch_y, inits = next_batch()
# Reshape data to get 28 seq of 28 elements
#batch_x = batch_x.reshape((batch_size, n_steps, n_input))
# Run optimization op (backprop)
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
if step % display_step == 0:
# Calculate batch loss
loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y})
print(str(step) + ':' + str(loss))
step += 1
print("Optimization Finished!")
batch_size = 1
test_data, test_label, inits = next_batch()
#print "Testing Accuracy:", \
#sess.run(accuracy, feed_dict={x: test_data, y: test_label})
p2 = sess.run(pred, feed_dict={x: test_data, y: test_label})
#print('---inits---')
#print(inits)
print('---batch---')
print(test_data)
print('---truth---')
print(test_label)
print('---pred---')
print(p2)
c_final = get_cost(p2, test_label)
print(c_final)
First, we generate some data: a 2-dimensional series of sin(i) and cos(i), with i running from 1 to N. This gives us the variable y. Then we add some Normal noise to this series, and that's x. Then, we train a Recurrent Neural Net to create the clean output from the noisy input. In other words, we train the net such that it will output [cos(i),sin(i)] from input [cos(i)+e1,sin(i)+e2) ]. This is a plain vanilla denoising autoencoder, except that the data has a time element. Now you can feed new data into the neural net, and it will hopefully remove the noise.