I am reading the tests in the TensorFlow MNIST official model. Line 49 has:
self.assertEqual(loss.shape, ())
and selected lines leading up to it are:
BATCH_SIZE = 100
def dummy_input_fn():
image = tf.random_uniform([BATCH_SIZE, 784])
labels = tf.random_uniform([BATCH_SIZE, 1], maxval=9, dtype=tf.int32)
return image, labels
def make_estimator():
return tf.estimator.Estimator(
model_fn=mnist.model_fn, params={
'data_format': 'channels_last'
})
class Tests(tf.test.TestCase):
"""Run tests for MNIST model."""
def test_mnist(self):
classifier = make_estimator()
classifier.train(input_fn=dummy_input_fn, steps=2)
loss = eval_results['loss']
self.assertEqual(loss.shape, ())
but the TensorFlow documentation suggests that a shape is an array of numbers:
t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
tf.shape(t) # [2, 2, 3]
These two statements that print the shape of the object don't help much:
print(loss.shape)
# prints `()`
print(tf.shape(loss))
# prints `Tensor("Shape:0", shape=(0,), dtype=int32)`
What is the meaning of a () shape?
Your loss is a NumPy object and not a TensorFlow object:
print(type(loss))
# prints <class 'numpy.float32'>
print(loss)
# prints 2.2745261
I assume that a shape of () in NumPy means a scalar, though I could not find a documentation for it. You can see the list of object attributes (fields and methods) with:
print(dir(loss))
# prints `['T', '__abs__', '__add__', '__and__',
# ... 'shape', 'size', 'sort', ... ]`
Related
I have a dataset which contains many snapshot observations in time and a 1 or 0 as a label for each observation. Lets say each observation contains 3 features. I am wanting to train an LSTM which will take a sequence of n observations and attempt to classify nth observation as a 1 or 0.
So if we have a dataset that looks like this:
# X = [[0, 1, 1], [1, 0, 0], [1, 1, 1], [1, 1, 0]]
# y = [1, 0, 1, 0]
# so X[0] = y[0], X[1] = y[1]
# . and I would like to input X[0] + X[1] to classify X[1] as y[1]
# . How would I need to structure this below?
X = [[0, 1, 1], [1, 0, 0], [1, 1, 1], [1, 1, 0]]
y = [1, 0, 1, 0]
def create_model():
model = Sequential()
# input_shape[0] is equal to 2 timesteps?
# input_shape[1] is equal to the 3 features per row?
model.add(LSTM(20, input_shape=(2, 3)))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
m = create_model()
m.fit(X, y)
So I want X[0] and X[1] to be the input for one iteration of training and should be classified as y[1].
My question is this. How do I structure the model in order to take this input properly? I am very confused by input_shape, features, input_length, batches etc ...
The below code snippet might help clarify:
from keras.models import Sequential
from keras.layers import LSTM, Dense
import numpy as np
# Number of samples = 4, sequence length = 3, features = 2
X = np.array( [ [ [0, 1], [1, 0,], [1, 1] ],
[ [1, 1], [1, 1,], [1, 0] ],
[ [0, 1], [1, 0,], [0, 0] ],
[ [1, 1], [1, 1,], [1, 1] ]] )
y = np.array([[1], [0], [1], [0]])
print(X)
print(X.shape)
print(y.shape)
model = Sequential()
model.add(LSTM(20, input_shape=(3, 2)))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
model.fit(X, y)
Also, on the Keras documentation page: https://keras.io/getting-started/sequential-model-guide/ look at the example for "Stacked LSTM for sequence classification" near the bottom. It might help.
In general using Keras, the batch dimension/sample dimension is not specified in layers - it is automatically inferred from the input data.
I hope this helps.
You have the input shape correct.
I would reshape the input data to be (batch_size, timesteps, features)
m = create_model()
X.reshape((batch_size, 2, 3))
m.fit(X, y)
Common batch sizes are 4, 8 , 16, 32 but for small dataset the impact of the batch size is less important.
And when you want to predict use batch_size = 1
I am trying to manually implement a very simple RNN using tensorflow2. I modeled my code on the example to manually make models on tensorflow website. The code, stripped to bare essentials for this purpose, is
class ModelSimple(object):
def __init__(self):
# Initialize the weights to `5.0` and the bias to `0.0`
# In practice, these should be initialized to random values (for example, with `tf.random.normal`)
self.W = tf.Variable(tf.random.normal([]))
self.b = tf.Variable(tf.random.normal([]))
def __call__(self, x):
return self.W * x + self.b
def loss(predicted_y, target_y):
return tf.reduce_mean(tf.square(predicted_y - target_y))
NUM_EXAMPLES = 1000
inputs = tf.random.normal(shape=[NUM_EXAMPLES])
outputs = tf.zeros(NUM_EXAMPLES)
model = ModelSimple()
with tf.GradientTape() as t:
t.watch([model.W,model.b])
current_loss = loss(model(inputs), outputs)
dW, db = t.gradient(current_loss, [model.W, model.b])
print(dW,db)
This gives nice tensors for dW and db. Then I try to do what I described above
class ModelRNN(object):
def __init__(self, n_inputs, n_neurons):
self.n_inputs = n_inputs
self.n_neurons = n_neurons
# weights for new input
self.Wx = tf.Variable(tf.random.normal(shape=[self.n_inputs, self.n_neurons], dtype=tf.float32))
# weights for previous output
self.Wy = tf.Variable(tf.random.normal(shape=[self.n_neurons, self.n_neurons], dtype=tf.float32))
# bias weights
self.b = tf.Variable(tf.zeros([1, self.n_neurons], dtype=tf.float32))
def __call__(self, X_batch):
# get shape of input
batch_size, num_time_steps, _ = X_batch.get_shape()
# we will loop through the time steps and the output of the previous computation feeds into
# the next one.
# this variable keeps track of it and is initialized to zero
y_last = tf.Variable(tf.zeros([batch_size, self.n_neurons], dtype=tf.float32))
# the outputs will be stored in this tensor
Ys = tf.Variable(tf.zeros([batch_size, num_time_steps, self.n_neurons], dtype=tf.float32))
for t in range(num_time_steps):
Xt = X_batch[:, t, :]
yt = tf.tanh(tf.matmul(y_last, self.Wy) +
tf.matmul(Xt, self.Wx) +
self.b)
y_last.assign(yt)
Ys[:, t, :].assign(yt)
return Ys
inputs = tf.convert_to_tensor(np.array([
# t = 0 t = 1
[[0, 1, 2], [9, 8, 7]], # instance 1
[[3, 4, 5], [0, 0, 0]], # instance 2
[[6, 7, 8], [6, 5, 4]], # instance 3
[[9, 0, 1], [3, 2, 1]], # instance 4
],dtype=np.float32))
outputs=tf.Variable(tf.zeros((4,2,5),dtype=np.float32))
model = ModelRNN(3, 5)
with tf.GradientTape() as t:
t.watch([model.Wx,model.Wy,model.b])
current_loss = loss(model(inputs), outputs)
dWx,dWy,db = t.gradient(current_loss, [model.Wx, model.Wy,model.b])
print(dWx,dWy,db)
and it turns out dWx,dWy,db are all None. I have tried several things (including watching them using the GradientTape despite them being variables) and yet I keep getting None. What am I doing wrong?
It looks like this is related to this issue:
Tensorflow cannot get gradient wrt a Variable, but can wrt a Tensor
Replacing assign with a python list and tf.stack results in a gradient being returned
Ys = []
for t in range(num_time_steps):
Xt = X_batch[:, t, :]
yt = tf.tanh(tf.matmul(y_last, self.Wy) +
tf.matmul(Xt, self.Wx) +
self.b)
y_last.assign(yt)
Ys.append(yt)
return tf.stack(Ys,axis=1)
From the accepted answer in this question,
given the following
input and kernel matrices, the output of tf.nn.conv2d is
[[14 6]
[6 12]]
which makes sense. However, when I make the input and kernel matrices have 3-channels each (by repeating each original matrix), and run the same code:
# the previous input
i_grey = np.array([
[4, 3, 1, 0],
[2, 1, 0, 1],
[1, 2, 4, 1],
[3, 1, 0, 2]
])
# copy to 3-dimensions
i_rgb = np.repeat( np.expand_dims(i_grey, axis=0), 3, axis=0 )
# convert to tensor
i_rgb = tf.constant(i_rgb, dtype=tf.float32)
# make kernel depth match input; same process as input
k = np.array([
[1, 0, 1],
[2, 1, 0],
[0, 0, 1]
])
k_rgb = np.repeat( np.expand_dims(k, axis=0), 3, axis=0 )
# convert to tensor
k_rgb = tf.constant(k_rgb, dtype=tf.float32)
here's what my input and kernel matrices look like at this point
# reshape input to format: [batch, in_height, in_width, in_channels]
image_rgb = tf.reshape(i_rgb, [1, 4, 4, 3])
# reshape kernel to format: [filter_height, filter_width, in_channels, out_channels]
kernel_rgb = tf.reshape(k_rgb, [3, 3, 3, 1])
conv_rgb = tf.squeeze( tf.nn.conv2d(image_rgb, kernel_rgb, [1,1,1,1], "VALID") )
with tf.Session() as sess:
conv_result = sess.run(conv_rgb)
print(conv_result)
I get the final output:
[[35. 15.]
[35. 26.]]
But I was expecting the original output*3:
[[42. 18.]
[18. 36.]]
because from my understanding, each channel of the kernel is convolved with each channel of the input, and the resultant matrices are summed to get the final output.
Am I missing something from this process or the tensorflow implementation?
Reshape is a tricky function. It will produce you the shape you want, but can easily ground things together. In cases like yours, one should avoid using reshape by all means.
In that particular case instead, it is better to duplicate the arrays along the new axis. When using [batch, in_height, in_width, in_channels] channels is the last dimension and it should be used in repeat() function. Next code should better reflect the logic behind it:
i_grey = np.expand_dims(i_grey, axis=0) # add batch dim
i_grey = np.expand_dims(i_grey, axis=3) # add channel dim
i_rgb = np.repeat(i_grey, 3, axis=3 ) # duplicate along channels dim
And likewise with filters:
k = np.expand_dims(k, axis=2) # input channels dim
k = np.expand_dims(k, axis=3) # output channels dim
k_rgb = np.repeat(k, 3, axis=2) # duplicate along the input channels dim
I run the code for second respondents and catch an error.
tensorflow batch normalizatioon second respondents
Error pic here
When you run the op, you must provide a value for feed_dict.
Here is an example program:
import tensorflow as tf
# Define the inputs you will feed into the tensorflow computation graph
a = tf.placeholder(tf.int32, shape=[1], name="a")
x = tf.placeholder(tf.int32, shape=[4], name="x")
# This is the actual computation we want to run.
output = a * x
with tf.Session() as sess:
# Actually run the computation, feeding in [10] for a, and [1, 2, 3, 4] for x.
# This will print out: [10 20 30 40]
print sess.run(output, feed_dict={a: [10], x: [1, 2, 3, 4]})
I have two embeddings tensor A and B, which looks like
[
[1,1,1],
[1,1,1]
]
and
[
[0,0,0],
[1,1,1]
]
what I want to do is calculate the L2 distance d(A,B) element-wise.
First I did a tf.square(tf.sub(lhs, rhs)) to get
[
[1,1,1],
[0,0,0]
]
and then I want to do an element-wise reduce which returns
[
3,
0
]
but tf.reduce_sum does not allow my to reduce by row. Any inputs would be appreciated. Thanks.
Add the reduction_indices argument with a value of 1, eg.:
tf.reduce_sum( tf.square( tf.sub( lhs, rhs) ), 1 )
That should produce the result you're looking for. Here is the documentation on reduce_sum().
According to TensorFlow documentation, reduce_sum function which takes four arguments.
tf.reduce_sum(input_tensor, axis=None, keep_dims=False, name=None, reduction_indices=None).
But reduction_indices has been deprecated. Better to use axis instead of. If the axis is not set, reduces all its dimensions.
As an example,this is taken from the documentation,
# 'x' is [[1, 1, 1]
# [1, 1, 1]]
tf.reduce_sum(x) ==> 6
tf.reduce_sum(x, 0) ==> [2, 2, 2]
tf.reduce_sum(x, 1) ==> [3, 3]
tf.reduce_sum(x, 1, keep_dims=True) ==> [[3], [3]]
tf.reduce_sum(x, [0, 1]) ==> 6
Above requirement can be written in this manner,
import numpy as np
import tensorflow as tf
a = np.array([[1,7,1],[1,1,1]])
b = np.array([[0,0,0],[1,1,1]])
xtr = tf.placeholder("float", [None, 3])
xte = tf.placeholder("float", [None, 3])
pred = tf.reduce_sum(tf.square(tf.subtract(xtr, xte)),1)
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
nn_index = sess.run(pred, feed_dict={xtr: a, xte: b})
print nn_index