output from tf.tensordot(x,y,2) - tensorflow

I try to understand the collaborative filter recommender
One particular part is about tf.tensordot(x,y,2) in the following code:
def call(self, inputs):
user_vector = self.user_embedding(inputs[:, 0])
user_bias = self.user_bias(inputs[:, 0])
movie_vector = self.movie_embedding(inputs[:, 1])
movie_bias = self.movie_bias(inputs[:, 1])
dot_user_movie = tf.tensordot(user_vector, movie_vector, 2)
# Add all the components (including bias)
x = dot_user_movie + user_bias + movie_bias
# The sigmoid activation forces the rating to between 0 and 1
return tf.nn.sigmoid(x)
dot_user_movie.shape is () but x.shape is (None, 1). Why? I expect dot_user_movie.shape be (None, 1) as well.
What I really want to do is to remove bias terms. But it will give me some error if I do the following:
x = dot_user_movie
return tf.nn.sigmoid(x)
I try to mimic the above calculation with explicit values:
emb = layers.Embedding(20,4)
bias = layers.Embedding(20, 1)
x1 = emb(np.array([0,2,1]))
y1 = emb(np.array([3,5,1]))
dot_x1_y1 = tf.tensordot(x1, y1, 2)
#<tf.Tensor: shape=(), dtype=float32, numpy=0.0065868925>
bias(np.array([0,2,1]))
dot_x1_y1+bias
## this will cause some error ValueError: Attempt to convert a value
## (<keras.layers.core.embedding.Embedding object at0x7f7211a59640>)
## with an unsupported type (<class'keras.layers.core.embedding.Embedding'>)
## to a Tensor.

Related

How to make 2 tensors the same length by mean | median imputation of the shortest tensor?

I'm trying to subclass a the base Keras layer to create a layer that will merge the rank 1 output of 2 layers of a skip connection by outputting the Dot product of 2 tensors. The 2 incoming tensors are created by Dense layers parsed by a Neural Architecture Search algorithm that randomly selects the number of Dense units and hence the length of the 2 tensors. These of course will usually not be of the same length. I am trying an experiment to see if casting them to the same length by means of appending the shorter tensor with a mathematically meaningful imputation: [e.g. mean | median | hypotenuse | cos | ... etc] then merging them by means of the dot product will outperform Add or Concatenate merging strategies. To make them the same length:
I try the overall strategy:
Find the shorter tensor.
Pass it to tf.reduce_mean() (aliasing the resulting mean as "rm" for the sake of discussion).
Create a list of [rm for rm in range(['difference in length of the longer tensor and the shorter tensor']). Cast as a tensor if necessary.
[pad | concatenate] the shorter tensor with the result of the operation above to make it equal in length.
Here is where I am running into a dead wall:
Since the tf operation reduce_mean is returning a future with its shape set as None (not assumed to be a scalar of 1), they are in a state of having a shape of '(None,)', which the tf.keras.layers.Dot layer refuses to ingest and throws a ValueError, as it does not see them as being the same length, though they always will be:
KerasTensor(type_spec=TensorSpec(shape=(None,), dtype=tf.float32, name=None), name='tf.math.reduce_mean/Mean:0', description="created by layer 'tf.math.reduce_mean'")
ValueError: A Concatenate layer should be called on a list of at least 1 input. Received: input_shape=[[(None,), (None,)], [(None, 3)]]
My code (in the package/module):
import tensorflow as tf
import numpy as np
class Linear1dDot(tf.keras.layers.Layer):
def __init__(self, input_dim=None,):
super(Linear1dDot, self).__init__()
def __call__(self, inputs):
max_len = tf.reduce_max(tf.Variable(
[inp.shape[1] for inp in inputs]))
print(f"max_len:{max_len}")
for i in range(len(inputs)):
inp = inputs[i]
print(inp.shape)
inp_lenght = inp.shape[1]
if inp_lenght < max_len:
print(f"{inp_lenght} < {max_len}")
# pad_with = inp.reduce_mean()
pad_with = tf.reduce_mean(inp, axis=1)
print(pad_with)
padding = [pad_with for _ in range(max_len - inp_lenght)]
inputs[i] = tf.keras.layers.concatenate([padding, [inp]])
# inputs[i] = tf.reshape(
# tf.pad(inp, padding, mode="constant"), (None, max_len))
print(inputs)
return tf.keras.layers.Dot(axes=1)(inputs)
...
# Alternatively substituting the last few lines with:
pad_with = tf.reduce_mean(inp, axis=1, keepdims=True)
print(pad_with)
padding = tf.keras.layers.concatenate(
[pad_with for _ in range(max_len - inp_lenght)])
inputs[i] = tf.keras.layers.concatenate([padding, [inp]])
# inputs[i] = tf.reshape(
# tf.pad(inp, padding, mode="constant"), (None, max_len))
print(inputs)
return tf.keras.layers.Dot(axes=1)(inputs)
... and countless other permutations of attempts ...
Does anyone know a workaround or have any advice? (other than 'Don't try to do this.')?
In the parent folder of this module's package ...
Test to simulate a skip connection merging into the current layer:
from linearoneddot.linear_one_d_dot import Linear1dDot
x = tf.constant([1, 2, 3, 4, 5])
y = tf.constant([0, 9, 8])
inp1 = tf.keras.layers.Input(shape=3)
inp2 = tf.keras.layers.Input(shape=5)
xd = tf.keras.layers.Dense(3, "relu")(inp1)
yd = tf.keras.layers.Dense(5, 'elu')(inp2)
combined = Linear1dDot()([xd, yd]) # tf.keras.layers.Dot(axes=1)([xd, yd])
z = tf.keras.layers.Dense(2)(combined)
model = tf.keras.Model(inputs=[inp1, inp2], outputs=z) # outputs=z)
print(model([x, y]))
print(model([np.random.random((3, 3)), np.random.random((3, 5))]))
Does anyone know a workaround that will be able to get the mean of the shorter rank 1 tensor as a scalar, which I can then append / pad to the shorter tensor to a set intended langth (same length as the longer tensor).
Try this, hope this will work, Try to padd the shortest input with 1, and then concat it with the input then take the dot product, then finally subtract the extra ones which were added in the dot product...
class Linear1dDot(tf.keras.layers.Layer):
def __init__(self,**kwargs):
super(Linear1dDot, self).__init__()
def __call__(self, inputs):
_input1 , _input2 = inputs
_input1_shape = _input1.shape[1]
_input2_shape = _input2.shape[1]
difference = tf.math.abs(_input1_shape - _input2_shape)
padded_input = tf.ones(shape=(1,difference))
if _input1_shape > _input2_shape:
padded_tensor = tf.concat([_input2 ,padded_input],axis=1)
scaled_output = tf.keras.layers.Dot(axes=1)([padded_tensor, _input1])
scaled_output -= tf.reduce_sum(padded_input)
return scaled_output
else:
padded_tensor = tf.concat([_input1 , padded_input],axis=1)
scaled_output = tf.keras.layers.Dot(axes=1)([padded_tensor, _input2])
scaled_output -= tf.reduce_sum(padded_input)
return scaled_output
x = tf.constant([[1, 2, 3, 4, 5, 9]])
y = tf.constant([[0, 9, 8]])
inp1 = tf.keras.layers.Input(shape=3)
inp2 = tf.keras.layers.Input(shape=5)
xd = tf.keras.layers.Dense(5, "relu")(x)
yd = tf.keras.layers.Dense(3, 'elu')(y)
combined = Linear1dDot()([xd, yd]) # tf.keras.layers.Dot(axes=1)([xd, yd])
Output:
<tf.Tensor: shape=(1, 1), dtype=float32, numpy=array([[4.4694786]], dtype=float32)>

batch size > 1 gives an error using TensorFlow 1.x

I am using this example of a VAE.
The only difference I made was change the loss from binary cross entropy to MSE, like this:
class OptimizerVAE(object):
def __init__(self, model, learning_rate=1e-3):
"""
OptimizerVAE initializer
:param model: a model object
:param learning_rate: float, learning rate of the optimizer
"""
# binary cross entropy error
self.bce = tf.keras.losses.mse(model.x, model.logits)
self.reconstruction_loss = tf.reduce_mean(tf.reduce_sum(self.bce, axis=-1))
if model.distribution == 'normal':
# KL divergence between normal approximate posterior and standard normal prior
self.p_z = tf.distributions.Normal(tf.zeros_like(model.z), tf.ones_like(model.z))
kl = model.q_z.kl_divergence(self.p_z)
self.kl = tf.reduce_mean(tf.reduce_sum(kl, axis=-1))*0.1
elif model.distribution == 'vmf':
# KL divergence between vMF approximate posterior and uniform hyper-spherical prior
self.p_z = HypersphericalUniform(model.z_dim - 1, dtype=model.x.dtype)
kl = model.q_z.kl_divergence(self.p_z)
self.kl = tf.reduce_mean(kl)*0.1
else:
raise NotImplemented
self.ELBO = - self.reconstruction_loss - self.kl
self.train_step = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(-self.ELBO)
self.print = {'recon loss': self.reconstruction_loss, 'ELBO': self.ELBO, 'KL': self.kl}
and when running the original architecture, the model runs perfectly (2 MLP layers), no matter the size of the batches (specified as "None" in the github code).
I am trying to change this to a convolutional model, but when I change just the encoder to this:
def _encoder(self, x):
"""
Encoder network
:param x: placeholder for input
:return: tuple `(z_mean, z_var)` with mean and concentration around the mean
"""
# 2 hidden layers encoder
#h0 = tf.layers.dense(x, units=self.h_dim * 2, activation=self.activation)
#h1 = tf.layers.dense(h0, units=self.h_dim, activation=self.activation)
h1 = tf.layers.conv1d(x, filters = 32, kernel_size = 7, activation = tf.nn.relu)
h1 = tf.layers.conv1d(h1, filters = 64, kernel_size = 7, activation =tf.nn.relu)
h1 = tf.layers.conv1d(h1, filters = 64, kernel_size = 7, activation = tf.nn.relu)
h1 = tf.layers.flatten(h1)
h1 = tf.layers.dense(h1, 32, activation = tf.nn.relu)
if self.distribution == 'normal':
# compute mean and std of the normal distribution
z_mean = tf.layers.dense(h1, units=self.z_dim, activation=None, name = 'z_output')
z_var = tf.layers.dense(h1, units=self.z_dim, activation=tf.nn.softplus)
elif self.distribution == 'vmf':
# compute mean and concentration of the von Mises-Fisher
z_mean = tf.layers.dense(h1, units=self.z_dim, activation=lambda x: tf.nn.l2_normalize(x, axis=-1))
# the `+ 1` prevent collapsing behaviors
z_var = tf.layers.dense(h1, units=1, activation=tf.nn.softplus) + 1
else:
raise NotImplemented
return z_mean, z_var
and when running the model, I get the error:
InvalidArgumentError: Incompatible shapes: [32,1] vs. [32,512,1]
[[{{node gradients/SquaredDifference_grad/BroadcastGradientArgs}}]]
32 is the batch_size when running the model. The thing that is confusing me is when I run this with batch_size = 1, the model runs!
Where is this going wrong? is it the optimizer and the way it averages?
I solved the issue by reshaping the output from the decoder in form: (win_size, 1), since the MLP fails to add that extra dim'n in!

Weighted Pixel Wise Categorical Cross Entropy for Semantic Segmentation

I have recently started learning about Semantic Segmentation. I am trying to train a UNet for the same. My input is RGB 128x128x3 images. My masks are made up of 4 classes 0, 1, 2, 3 and are One-Hot Encoded with dimension 128x128x4.
def weighted_cce(y_true, y_pred):
weights = []
t_inf = tf.convert_to_tensor(1e9, dtype = 'float32')
t_zero = tf.convert_to_tensor(0, dtype = 'int64')
for i in range(0, 4):
l = tf.argmax(y_true, axis = -1) == i
n = tf.cast(tf.math.count_nonzero(l), 'float32') + K.epsilon()
weights.append(n)
weights = [batch_size/j for j in weights]
y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
# clip to prevent NaN's and Inf's
y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())
# calc
loss = y_true * K.log(y_pred) * weights
loss = -K.sum(loss, -1)
return loss
This is the loss function that I am using but it classifies every pixel as 2. What am I doing wrong?
You should have weights based on you entire data (unless your batch size is reasonably big so you have sort of stable weights).
If some class is underrepresented, with a small batch size, it will have near infinity weights.
If your target data is numpy array:
shp = y_train.shape
totalPixels = shp[0] * shp[1] * shp[2]
weights = np.sum(y_train, axis=(0, 1, 2)) #final shape (4,)
weights = totalPixels/weights
If your data is in a Sequence generator:
totalPixels = 0
counts = np.zeros((4,))
for i in range(len(generator)):
x, y = generator[i]
shp = y.shape
totalPixels += shp[0] * shp[1] * shp[2]
counts = counts + np.sum(y, axis=(0,1,2))
weights = totalPixels / counts
If your data is in a yield generator (you must know how many batches you have in an epoch):
for i in range(batches_per_epoch):
x, y = next(generator)
#the rest is equal to the Sequence example above
Attempt 1
I don't know if newer versions of Keras are able to handle this, but you can try the simplest approach first: simply call fit or fit_generator with the class_weight argument:
model.fit(...., class_weight = {0: weights[0], 1: weights[1], 2: weights[2], 3: weights[3]})
Attempt 2
Make a healthier loss function:
weights = weights.reshape((1,1,1,4))
kWeights = K.constant(weights)
def weighted_cce(y_true, y_pred):
yWeights = kWeights * y_pred #shape (batch, 128, 128, 4)
yWeights = K.sum(yWeights, axis=-1) #shape (batch, 128, 128)
loss = K.categorical_crossentropy(y_true, y_pred) #shape (batch, 128, 128)
wLoss = yWeights * loss
return K.sum(wLoss, axis=(1,2))

Dynamic output shape is incorrect/ not same as static

I am trying to implement a patch creation function with using tensorflow's extract_image_patches function but dynamic output shape is not same as my expectation.
Let me tell briefly what it does. Input shape is supposed to be
6000x4000. We first find its greatest common denominator. It turns out it is 3. then we pass '64' argument to our function to create patches with size of 3x64,2x64=192,128. This returns us 31x31 distinct patches. Everything works ok with static output, but when it comes to dynamic output things are not ok. I could not find which part caused a different dynamic output.
# input_shape_inbuild: (None, 6000, 4000, 1)
# ---LAYER---
# Input Size: (None, 6000, 4000, 1)
# Patch Size: (x,y) = 192, 128
# Aspect ratio: (3, 2)
!wget https://www.fujifilm.com/products/digital_cameras/x/fujifilm_x_pro2/sample_images/img/index/ff_x_pro2_001.JPG
img = cv2.imread('ff_x_pro2_001.JPG', 0)
img = tf.reshape(img, [1,img.shape[0],img.shape[1],1])
***tensorflow takes images as (y, x)
so a 6000x4000 im is given as tf.func(4000, 6000)
# Here I define custom layer in tensorflow.
class create_patches(Layer):
def __init__(self, patchMultiplier):
super(create_patches, self).__init__()
self.patchMultiplier = patchMultiplier
def build(self, input_shape):
print('input_shape_inbuild: ', input_shape)
def aspect_ratio(width, height):
#find greatest common divider of input_shape
def gcd(x, y):
while y != 0:
(x, y) = (y, x % y)
return x
r = gcd(width, height)
x = int(width/r)
y = int(height/r)
return x, y
self.aspect_ratio = aspect_ratio(input_shape[1], input_shape[2])
self.patchSize_x = self.aspect_ratio[0] * self.patchMultiplier
self.patchSize_y = self.aspect_ratio[1] * self.patchMultiplier
def call(self, inputs):
print('---LAYER---')
print('Input Size:', inputs._keras_shape)
print('Patch Size: (x,y) = {}, {}'.format(self.patchSize_x, self.patchSize_y))
print('Aspect ratio: {}'.format(self.aspect_ratio))
print('---LAYER---')
#call tf.extract_image_patches to return it.
out = tf.extract_image_patches(images=inputs,
ksizes=[1, self.patchSize_y, self.patchSize_x, 1],
strides=[1, self.patchSize_y, self.patchSize_x, 1],
rates=[1, 1, 1, 1],
padding='VALID')
return out
def compute_output_shape(self, input_shape):
"""
ksize_cols = patchSize_x
ksize_rows = patchSize_y
"""
#output shape=[batch, out_rows, out_cols, ksize_rows * ksize_cols * depth]
"""
shape = (self.patchSize_x, self.patchSize_y,
(input_shape[1]/self.patchSize_x) * (input_shape[2]/self.patchSize_y))
"""
shape =(input_shape[0],
input_shape[1]/self.patchSize_x, # patch row count
input_shape[2]/self.patchSize_y, # patch col count
self.patchSize_x * self.patchSize_y) # patch pixel count
return shape
#here is input with 6000x4000 pixels.
input_shape_1 = Input(shape=(6000, 4000, 1))
#here I fed input to my custom layer.
x1 = create_patches(64)(input_shape_1)
print('Output shape: ', x1.shape)
# here I build a model to see static output
f = K.function([input_shape_1], [x1])
import numpy as np
#result = f([np.random.randint(256, size=(1,4000,6000,1))])
result = f([img])
#print(result)
result = np.array(result)
# [batch, out_rows, out_cols, ksize_rows * ksize_cols * depth]
# Result shape: (1, 1, 125, 125, 1536)
print('Result shape: ', result.shape, '\n\n')
#print(result[:, :, :, 0].shape)
here is output I get.
input_shape_inbuild: (None, 6000, 4000, 1)
---LAYER---
Input Size: (None, 6000, 4000, 1)
Patch Size: (x,y) = 192, 128
Aspect ratio: (3, 2)
---LAYER---
Output shape: (?, 46, 20, 24576)
Result shape: (1, 1, 31, 31, 24576)
#####Result Shape is as I expected but at output shape I could not resolve where 46 and 20 come from. Could you tell me why it is like this?

Predict next number in a pattern

I am trying to write a simple program using TensorFlow to predict the next number in a sequence.
I am not experienced in TensorFlow so instead of starting from scratch I started with this guide: http://monik.in/a-noobs-guide-to-implementing-rnn-lstm-using-tensorflow/
However, in contrast to the implementation in the link above I do not want to treat the problem as a classification problem - where I only have n possible outcomes - but instead just calculate a single value for a sequence.
I tried modifying the code to fit my problem:
import numpy as np
import random
from random import shuffle
import tensorflow as tf
NUM_EXAMPLES = 10000
train_input = ['{0:020b}'.format(i) for i in range(2**20)]
shuffle(train_input)
train_input = [map(int,i) for i in train_input]
ti = []
for i in train_input:
temp_list = []
for j in i:
temp_list.append([j])
ti.append(np.array(temp_list))
train_input = ti
train_output = []
for i in train_input:
count = 0
for j in i:
if j[0] == 1:
count+=1
#temp_list = ([0]*21)
#temp_list[count]=1
#train_output.append(temp_list)
train_output.append(count)
test_input = train_input[NUM_EXAMPLES:]
test_output = train_output[NUM_EXAMPLES:]
train_input = train_input[:NUM_EXAMPLES]
train_output = train_output[:NUM_EXAMPLES]
print "test and training data loaded"
target = tf.placeholder(tf.float32, [None, 1])
data = tf.placeholder(tf.float32, [None, 20,1]) #Number of examples, number of input, dimension of each input
#target = tf.placeholder(tf.float32, [None, 1])
#print('target shape: ', target.get_shape())
#print('shape[0]', target.get_shape()[1])
#print('int(shape) ', int(target.get_shape()[1]))
num_hidden = 24
cell = tf.nn.rnn_cell.LSTMCell(num_hidden)
val, _ = tf.nn.dynamic_rnn(cell, data, dtype=tf.float32)
val = tf.transpose(val, [1, 0, 2])
print('val shape, ', val.get_shape())
last = tf.gather(val, int(val.get_shape()[0]) - 1)
weight = tf.Variable(tf.truncated_normal([num_hidden, int(target.get_shape()[1])]))
bias = tf.Variable(tf.constant(0.1, shape=[target.get_shape()[1]]))
#prediction = tf.nn.softmax(tf.matmul(last, weight) + bias)
prediction = tf.matmul(last, weight) + bias
cross_entropy = -tf.reduce_sum(target - prediction)
optimizer = tf.train.AdamOptimizer()
minimize = optimizer.minimize(cross_entropy)
mistakes = tf.not_equal(tf.argmax(target, 1), tf.argmax(prediction, 1))
error = tf.reduce_mean(tf.cast(mistakes, tf.float32))
init_op = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init_op)
batch_size = 100
no_of_batches = int(len(train_input)) / batch_size
epoch = 500
for i in range(epoch):
ptr = 0
for j in range(no_of_batches):
inp, out = train_input[ptr:ptr+batch_size], train_output[ptr:ptr+batch_size]
ptr+=batch_size
sess.run(minimize,{data: inp, target: out})
print "Epoch ",str(i)
incorrect = sess.run(error,{data: test_input, target: test_output})
#print sess.run(prediction,{data: [[[1],[0],[0],[1],[1],[0],[1],[1],[1],[0],[1],[0],[0],[1],[1],[0],[1],[1],[1],[0]]]})
#print('Epoch {:2d} error {:3.1f}%'.format(i + 1, 100 * incorrect))
sess.close()
It is still work in progress, since the input is bogus as well as the cross entropy calculation.
However, my main problem is that the code doesn't compile at all.
I get this error:
ValueError: Cannot feed value of shape (100,) for Tensor
u'Placeholder:0', which has shape '(?, 1)'
The number 100 comes from the "batch_size" and the (?, 1) comes from the fact that my prediction is a one dimensional number. However, I do not have any idea where the problem is in my code?
Can anyone help me get the dimensions to match?
This error means your targets placeholder is being fed something with the wrong shape. To fix it, I think you should reshape something like test_output.reshape([-1, 1])
To fix the placeholders shape, change your code to
for i in range(epoch):
ptr = 0
for j in range(no_of_batches):
inp = train_input[ptr:ptr+batch_size]
out = train_output[ptr:ptr+batch_size]
ptr+=batch_size
out = np.reshape(out, (100,1)) #reshape
sess.run(minimize,{data: inp, target: out})
print ("Epoch ",str(i))
test_output = np.reshape(test_output, (1038576,1)) #reshape
incorrect = sess.run(error,{data: test_input, target: test_output})