I would like to achieve something similar:
https://rootpy.github.io/root_numpy/_images/plot_multiclass_1.png
What would be the most elegant solution? Get the weights, bias, function and data and plot it with some other tool or does TensorFlow have support for that?
As far as I know, Tensorflow does not directly support plotting decision boundaries.
It is certainly not the most elegant solution, but you can create a grid. Classify each point of the grid and then plot it. For example:
#!/usr/bin/env python
"""
Solve the XOR problem with Tensorflow.
The XOR problem is a two-class classification problem. You only have four
datapoints, all of which are given during training time. Each datapoint has
two features:
x o
o x
As you can see, the classifier has to learn a non-linear transformation of
the features to find a propper decision boundary.
"""
__author__ = "Martin Thoma"
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
# The training data
XOR_X = [[0, 0], [0, 1], [1, 0], [1, 1]] # Features
XOR_Y = [[0], [1], [1], [0]] # Class labels
XOR_Y = [[1, 0], [0, 1], [0, 1], [1, 0]] # Target values
assert len(XOR_X) == len(XOR_Y) # sanity check
# The network
nb_classes = 2
input_ = tf.placeholder(tf.float32,
shape=[None, len(XOR_X[0])],
name="input")
target = tf.placeholder(tf.float32,
shape=[None, nb_classes],
name="output")
nb_hidden_nodes = 2
# enc = tf.one_hot([0, 1], 2)
w1 = tf.Variable(tf.random_uniform([2, nb_hidden_nodes], -1, 1),
name="Weights1")
w2 = tf.Variable(tf.random_uniform([nb_hidden_nodes, nb_classes], -1, 1),
name="Weights2")
b1 = tf.Variable(tf.zeros([nb_hidden_nodes]), name="Biases1")
b2 = tf.Variable(tf.zeros([nb_classes]), name="Biases2")
activation2 = tf.sigmoid(tf.matmul(input_, w1) + b1)
hypothesis = tf.nn.softmax(tf.matmul(activation2, w2) + b2)
cross_entropy = -tf.reduce_sum(target * tf.log(hypothesis))
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(cross_entropy)
# Start training
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
for i in range(100000):
sess.run(train_step, feed_dict={input_: XOR_X, target: XOR_Y})
if i % 10000 == 0:
print('Epoch ', i)
print('Hypothesis ', sess.run(hypothesis,
feed_dict={input_: XOR_X,
target: XOR_Y}))
print('w1 ', sess.run(w1))
print('b1 ', sess.run(b1))
print('w2 ', sess.run(w2))
print('b2 ', sess.run(b2))
print('cost (ce)', sess.run(cross_entropy,
feed_dict={input_: XOR_X,
target: XOR_Y}))
# Visualize classification boundary
xs = np.linspace(-5, 5)
ys = np.linspace(-5, 5)
pred_classes = []
for x in xs:
for y in ys:
pred_class = sess.run(hypothesis,
feed_dict={input_: [[x, y]]})
pred_classes.append((x, y, pred_class.argmax()))
xs_p, ys_p = [], []
xs_n, ys_n = [], []
for x, y, c in pred_classes:
if c == 0:
xs_n.append(x)
ys_n.append(y)
else:
xs_p.append(x)
ys_p.append(y)
plt.plot(xs_p, ys_p, 'ro', xs_n, ys_n, 'bo')
plt.show()
which gives
Related
I am trying to manually implement a very simple RNN using tensorflow2. I modeled my code on the example to manually make models on tensorflow website. The code, stripped to bare essentials for this purpose, is
class ModelSimple(object):
def __init__(self):
# Initialize the weights to `5.0` and the bias to `0.0`
# In practice, these should be initialized to random values (for example, with `tf.random.normal`)
self.W = tf.Variable(tf.random.normal([]))
self.b = tf.Variable(tf.random.normal([]))
def __call__(self, x):
return self.W * x + self.b
def loss(predicted_y, target_y):
return tf.reduce_mean(tf.square(predicted_y - target_y))
NUM_EXAMPLES = 1000
inputs = tf.random.normal(shape=[NUM_EXAMPLES])
outputs = tf.zeros(NUM_EXAMPLES)
model = ModelSimple()
with tf.GradientTape() as t:
t.watch([model.W,model.b])
current_loss = loss(model(inputs), outputs)
dW, db = t.gradient(current_loss, [model.W, model.b])
print(dW,db)
This gives nice tensors for dW and db. Then I try to do what I described above
class ModelRNN(object):
def __init__(self, n_inputs, n_neurons):
self.n_inputs = n_inputs
self.n_neurons = n_neurons
# weights for new input
self.Wx = tf.Variable(tf.random.normal(shape=[self.n_inputs, self.n_neurons], dtype=tf.float32))
# weights for previous output
self.Wy = tf.Variable(tf.random.normal(shape=[self.n_neurons, self.n_neurons], dtype=tf.float32))
# bias weights
self.b = tf.Variable(tf.zeros([1, self.n_neurons], dtype=tf.float32))
def __call__(self, X_batch):
# get shape of input
batch_size, num_time_steps, _ = X_batch.get_shape()
# we will loop through the time steps and the output of the previous computation feeds into
# the next one.
# this variable keeps track of it and is initialized to zero
y_last = tf.Variable(tf.zeros([batch_size, self.n_neurons], dtype=tf.float32))
# the outputs will be stored in this tensor
Ys = tf.Variable(tf.zeros([batch_size, num_time_steps, self.n_neurons], dtype=tf.float32))
for t in range(num_time_steps):
Xt = X_batch[:, t, :]
yt = tf.tanh(tf.matmul(y_last, self.Wy) +
tf.matmul(Xt, self.Wx) +
self.b)
y_last.assign(yt)
Ys[:, t, :].assign(yt)
return Ys
inputs = tf.convert_to_tensor(np.array([
# t = 0 t = 1
[[0, 1, 2], [9, 8, 7]], # instance 1
[[3, 4, 5], [0, 0, 0]], # instance 2
[[6, 7, 8], [6, 5, 4]], # instance 3
[[9, 0, 1], [3, 2, 1]], # instance 4
],dtype=np.float32))
outputs=tf.Variable(tf.zeros((4,2,5),dtype=np.float32))
model = ModelRNN(3, 5)
with tf.GradientTape() as t:
t.watch([model.Wx,model.Wy,model.b])
current_loss = loss(model(inputs), outputs)
dWx,dWy,db = t.gradient(current_loss, [model.Wx, model.Wy,model.b])
print(dWx,dWy,db)
and it turns out dWx,dWy,db are all None. I have tried several things (including watching them using the GradientTape despite them being variables) and yet I keep getting None. What am I doing wrong?
It looks like this is related to this issue:
Tensorflow cannot get gradient wrt a Variable, but can wrt a Tensor
Replacing assign with a python list and tf.stack results in a gradient being returned
Ys = []
for t in range(num_time_steps):
Xt = X_batch[:, t, :]
yt = tf.tanh(tf.matmul(y_last, self.Wy) +
tf.matmul(Xt, self.Wx) +
self.b)
y_last.assign(yt)
Ys.append(yt)
return tf.stack(Ys,axis=1)
Based on this post, I tried to create another model, where I'm adding both categorical and continous variables.
Please find the code below:
from __future__ import print_function
import pandas as pd;
import tensorflow as tf
import numpy as np
from sklearn.preprocessing import LabelEncoder
if __name__ == '__main__':
# 1 categorical input feature and a binary output
df = pd.DataFrame({'cat2': np.array(['o', 'm', 'm', 'c', 'c', 'c', 'o', 'm', 'm', 'm']),
'num1': np.random.rand(10),
'label': np.array([0, 0, 1, 1, 0, 0, 1, 0, 1, 1])})
encoder = LabelEncoder()
encoder.fit(df.cat2.values)
X1 = encoder.transform(df.cat2.values).reshape(-1,1)
X2 = np.array(df.num1.values).reshape(-1,1)
# X = np.concatenate((X1,X2), axis=1)
Y = np.zeros((len(df), 2))
Y[np.arange(len(df)), df.label.values] = 1
# Neural net parameters
training_epochs = 5
learning_rate = 1e-3
cardinality = len(np.unique(X))
embedding_size = 2
input_X_size = 1
n_labels = len(np.unique(Y))
n_hidden = 10
# Placeholders for input, output
cat2 = tf.placeholder(tf.int32, [None], name='cat2')
x = tf.placeholder(tf.float32, [None, 1], name="input_x")
y = tf.placeholder(tf.float32, [None, 2], name="input_y")
embed_matrix = tf.Variable(
tf.random_uniform([cardinality, embedding_size], -1.0, 1.0),
name="embed_matrix"
)
embed = tf.nn.embedding_lookup(embed_matrix, cat2)
inputs_with_embed = tf.concat([x, embedding_aggregated], axis=2, name="inputs_with_embed")
# Neural network weights
h = tf.get_variable(name='h2', shape=[inputs_with_embed, n_hidden],
initializer=tf.contrib.layers.xavier_initializer())
W_out = tf.get_variable(name='out_w', shape=[n_hidden, n_labels],
initializer=tf.contrib.layers.xavier_initializer())
# Neural network operations
#embedded_chars = tf.nn.embedding_lookup(embeddings, x)
layer_1 = tf.matmul(inputs_with_embed,h)
layer_1 = tf.nn.relu(layer_1)
out_layer = tf.matmul(layer_1, W_out)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=out_layer, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
for epoch in range(training_epochs):
avg_cost = 0.
# Run optimization op (backprop) and cost op (to get loss value)
_, c = sess.run([optimizer, cost],
feed_dict={x: X2,cat2:X1, y: Y})
print("Optimization Finished!")
But I'm getting the following error. It seems I'm not concatenating the continous variable and embedding properly. But I'm not understanding how to fix it.
Please if someone can please guide me.
ValueError: Shape must be at least rank 3 but is rank 2 for 'inputs_with_embed_2' (op: 'ConcatV2') with input shapes: [?,1], [?,2], [] and with computed input tensors: input[2] = <2>.
Thanks!
If by embedding_agregated you mean embed (probably typo)
The error is that there is no axis=2 in your case , it should be axis=1
inputs_with_embed = tf.concat([x, embed], axis=1, name="inputs_with_embed")
embed has a shape [None, embedding_dimension] and x has a shape [None, 1]
They are both 2D tensors, so you have access to axis=0 or axis=1 (indexing at 0 not 1), therefore to have your input_with_embed of shape [None, embedding_dimension+1] you need to concat on the axis=1
I am trying to implement a tensor flow LSTM regression model for a list of inputs number.
example:
input_data = [1, 2, 3, 4, 5]
time_steps = 2
-> X == [[1, 2], [2, 3], [3, 4]]
-> y == [3, 4, 5]
The code is below:
TIMESTEPS = 20
num_hidden=20
Xd, yd = load_data()
train_input = Xd['train']
train_input = train_input.reshape(-1,20,1)
train_output = yd['train']
# train_input = [[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20],..
# train_output = [[21],[22],[23]....
test_input = Xd['test']
test_output = yd['test']
X = tf.placeholder(tf.float32, [None, 20, 1])
y = tf.placeholder(tf.float32, [None, 1])
cell = tf.nn.rnn_cell.LSTMCell(num_hidden, state_is_tuple=True)
val, state = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
val = tf.Print(val, [tf.argmax(val,1)], 'argmax(val)=' , summarize=20, first_n=7)
val = tf.transpose(val, [1, 0, 2])
val = tf.Print(val, [tf.argmax(val,1)], 'argmax(val2)=' , summarize=20, first_n=7)
# Take only the last output after 20 time steps
last = tf.gather(val, int(val.get_shape()[0]) - 1)
last = tf.Print(last, [tf.argmax(last,1)], 'argmax(val3)=' , summarize=20, first_n=7)
# define variables for weights and bias
weight = tf.Variable(tf.truncated_normal([num_hidden, int(y.get_shape()[1])]))
bias = tf.Variable(tf.constant(0.1, shape=[y.get_shape()[1]]))
# Prediction is matmul of last value + wieght + bias
prediction = tf.matmul(last, weight) + bias
# Cost function using softmax
# y is the true distrubution and prediction is the predicted
cost = tf.reduce_mean(-tf.reduce_sum(y * tf.log(prediction), reduction_indices=[1]))
#cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))
optimizer = tf.train.AdamOptimizer()
minimize = optimizer.minimize(cost)
from tensorflow.python import debug as tf_debug
inita = tf.initialize_all_variables()
sess = tf.Session()
sess.run(inita)
batch_size = 100
no_of_batches = int(len(train_input)/batch_size)
epoch = 10
test_size = 100
for i in range(epoch):
for start, end in zip(range(0, len(train_input), batch_size), range(batch_size, len(train_input)+1, batch_size)):
sess.run(minimize, feed_dict={X: train_input[start:end], y: train_output[start:end]})
test_indices = np.arange(len(test_input)) # Get A Test Batch
np.random.shuffle(test_indices)
test_indices = test_indices[0:test_size]
print (i, mean_squared_error(np.argmax(test_output[test_indices], axis=1), sess.run(prediction, feed_dict={X: test_input[test_indices]})))
print ("predictions", prediction.eval(feed_dict={X: train_input}, session=sess))
y_pred = prediction.eval(feed_dict={X: test_input}, session=sess)
sess.close()
test_size = test_output.shape[0]
ax = np.arange(0, test_size, 1)
plt.plot(ax, test_output, 'r', ax, y_pred, 'b')
plt.show()
But i am not able to minimize the cost, the calculated MSE increases at each step instead of decreasing.
I suspect there is a problem with the cost problem that i am using.
any thoughts or suggestions as to what i am doing wrong ?
Thanks
As mentioned in the comment, you had to change your loss function to the MSE function and reduce your learning rate. Is your error converging to zero ?
I have 200 images on a set, 100 identical squares and 100 identical circles. Images are 44x41 pixels and images are grayscale. I am trying to build a simple classifier to learn tensorflow.
The problem: the predictor vectors have always the same value regardless the input image.
Here's the code of my neural net:
import tensorflow as tf
import random as r
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from PIL import Image
%matplotlib inline
#create pictures
for i in range(100):
fig1 = plt.figure(frameon = False, figsize=(1,1), dpi=32)
ax1 = fig1.add_subplot(111, aspect='equal')
posx = 0.25
posy = 0.25
ax1.add_patch(
patches.Rectangle(
(posx,posy), # (x,y)
0.5, # width
0.5, # height
)
)
ax1.axis('off')
fig1.savefig('rect' + str(i) + '.png', bbox_inches='tight')
for i in range(100):
fig1 = plt.figure(frameon = False, figsize=(1,1), dpi=32)
ax1 = fig1.add_subplot(111, aspect='equal')
posx = 0.5
posy = 0.5
ax1.add_patch(
patches.Circle(
(posx,posy), # (x,y)
0.3,
)
)
ax1.axis('off')
fig1.savefig('circ' + str(i) + '.png', bbox_inches='tight')
# create vectors
train_features = np.zeros((200,44,41,1))
train_labels = np.zeros((200,2))
for i in range(100):
#get rect
im = Image.open("rect" + str(i) + ".png")
im = im.convert(mode = "L")
xxx =list(im.getdata())
imdata = np.reshape(xxx, (44,41,1))
train_features[i] = imdata
train_labels[i] = np.array([0,1])
#get circle
im = Image.open("circ" + str(i) + ".png")
im = im.convert(mode = "L")
xxx = list(im.getdata())
imdata = np.reshape(xxx, (44,41,1))
train_features[i+100] = imdata
train_labels[i+100] = np.array([1,0])
tf.reset_default_graph()
features = tf.placeholder(tf.float32,shape=[None,44,41, 1])
labels = tf.placeholder(tf.float32,shape=[None,2])
weights = tf.Variable(tf.truncated_normal([3,3, 1, 16], stddev=0.1))
biases = tf.Variable(tf.zeros(16))
weights2 = tf.Variable(tf.truncated_normal([3,3, 16, 64], stddev=0.1))
biases2 = tf.Variable(tf.zeros(64))
conv_layer = tf.nn.conv2d(features, weights, strides=[1, 1, 1, 1], padding='SAME')
conv_layer_b = tf.nn.bias_add(conv_layer, biases)
conv_layer_relu = tf.nn.relu(conv_layer_b)
conv_layer_pool = tf.nn.max_pool(conv_layer_relu, ksize=[1, 2, 2, 1], strides=[1, 1, 1, 1], padding='SAME')
conv_layer2 = tf.nn.conv2d(conv_layer_pool, weights2, strides=[1, 1, 1, 1], padding='SAME')
conv_layer2_b = tf.nn.bias_add(conv_layer2, biases2)
conv_layer2_relu = tf.nn.relu(conv_layer2_b)
conv_layer2_pool = tf.nn.max_pool(conv_layer2_relu, ksize=[1, 2, 2, 1], strides=[1, 1, 1, 1], padding='SAME')
#fully connected layer
weights_fc = tf.Variable(tf.truncated_normal([44*41*64, 256], stddev=0.1))
biases_fc = tf.Variable(tf.zeros([256]))
fc = tf.reshape(conv_layer2_pool, [-1, weights_fc.get_shape().as_list()[0]])
fc_logit = tf.add(tf.matmul(fc, weights_fc), biases_fc)
fc_relu = tf.nn.relu(fc_logit)
#fc_drop = tf.nn.dropout(fc_relu, 0.75)
# final layer
weights_out = tf.Variable(tf.truncated_normal([256, 2], stddev=0.1))
biases_out = tf.Variable(tf.zeros([2]))
out = tf.add(tf.matmul(fc_relu, weights_out), biases_out)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=out, labels=labels))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for _ in range(100):
sess.run(optimizer, feed_dict={
features: train_features[:],
labels: train_labels[:]})
for i in range(200):
outx = sess.run(out, feed_dict={
features: [train_features[i]],
labels: [train_labels[i]]})
print(outx)
print(train_labels[i])
print('---')
Try not to give the same name to two tensors. For example, you have conv_layer that is equal to tf.nn.conv2d(features, weights, strides=[1, 1, 1, 1], padding='SAME') then rewriten to tf.nn.bias_add(conv_layer, biases), then once more then its another shape and then ....
Use this naming for example:
conv_layer = tf.nn.conv2d(features, weights, strides=[1, 1, 1, 1], padding='SAME')
conv_layer_b = tf.nn.bias_add(conv_layer, biases)
conv_layer_relu = tf.nn.relu(conv_layer_b)
conv_layer_pool = tf.nn.max_pool(conv_layer_relu, ksize=[1, 2, 2, 1], strides=[1, 1, 1, 1], padding='SAME')
The algorithm learns one image at a time. Try to feed all the images in your set if your machine can handel it: sess.run(optimizer, feed_dict={features: train_features[:], labels: train_labels[:]}). If not 100 images from both classes. Are the images shuffled or first come 100 circle and than 100 squares? Here can lie the error. You update your weights 100 times with only squares in the last loop.
Can I see the the complete program, with the part that you print the predicted vector? As a first stage I would take the dropout out; let it overfit. And then, maybe, use a smaller fc_layer (512 or 256), smaller learning rate (0.01), and I prefere tf.get_variable('w1', shape=[3,3,1,16]) instead of tf.Variable(...), initialize the biases with value 0.1.
I would like to know how to let Tensorflow only update the specific matrix elements? The following code is from Tensorflow tutorials (https://www.tensorflow.org/versions/r0.11/tutorials/pdes/index.html#partial-differential-equations).
#Import libraries for simulation
import tensorflow as tf
import numpy as np
#Imports for visualization
import PIL.Image
def DisplayArray(a, fmt='jpeg', rng=[0,1]):
"""Display an array as a picture."""
a = (a - rng[0])/float(rng[1] - rng[0])*255
a = np.uint8(np.clip(a, 0, 255))
with open("fig/image.jpg","w") as f:
PIL.Image.fromarray(a).save(f, "jpeg")
#sess = tf.Session()
sess = tf.InteractiveSession()
# Computational Convenience Functions
def make_kernel(a):
"""Transform a 2D array into a convolution kernel"""
a = np.asarray(a)
a = a.reshape(list(a.shape) + [1,1])
return tf.constant(a, dtype=1)
def simple_conv(x, k):
"""A simplified 2D convolution operation"""
x = tf.expand_dims(tf.expand_dims(x, 0), -1)
y = tf.nn.depthwise_conv2d(x, k, [1, 1, 1, 1], padding='SAME')
return y[0, :, :, 0]
def laplace(x):
"""Compute the 2D laplacian of an array"""
laplace_k = make_kernel([[0.5, 1.0, 0.5],
[1.0, -6., 1.0],
[0.5, 1.0, 0.5]])
return simple_conv(x, laplace_k)
# Define the PDE
N = 500
# Initial Conditions -- some rain drops hit a pond
# Set everything to zero
u_init = np.zeros([N, N], dtype=np.float32)
ut_init = np.zeros([N, N], dtype=np.float32)
# Some rain drops hit a pond at random points
for n in range(40):
a,b = np.random.randint(0, N, 2)
u_init[a,b] = np.random.uniform()
DisplayArray(u_init, rng=[-0.1, 0.1])
# Parameters:
# eps -- time resolution
# damping -- wave damping
eps = tf.placeholder(tf.float32, shape=())
damping = tf.placeholder(tf.float32, shape=())
# Create variables for simulation state
U = tf.Variable(u_init)
Ut = tf.Variable(ut_init)
# Discretized PDE update rules
U_ = U + eps * Ut
Ut_ = Ut + eps * (laplace(U) - damping * Ut)
# Operation to update the state
step = tf.group(
U.assign(U_),
Ut.assign(Ut_))
# Initialize state to initial conditions
tf.initialize_all_variables().run()
# Run 1000 steps of PDE
for i in range(1000):
# Step simulation
step.run({eps: 0.03, damping: 0.04})
DisplayArray(U.eval(), rng=[-0.1, 0.1])
In step = tf.group(U.assign(U_),Ut.assign(Ut_)), I would like to know if it is possible to only update the values within U_[1:-1, 1:-1] and Ut_[1:-1, 1:-1], and keep the rest values as constants.
Thank you very much!
You can perform sliced assignment in Tensorflow. Try something like this:
assign_op = U[1:-1,1:-1].assign(U_[1:-1, 1:-1])
(The exact indices are up to you.)