How to get the Weighted Average Mean in Tensorflow - tensorflow2.0

from example_data below I need sum_product(x,y)/sum(y) - having x & y as Input... probably this part of model can even be trainable=False, but anyway, is there a simplier way to do such calculation (either from 1 tensor at all or at least from such separate tensors for vars & weights) ?
If there could be more beautiful Graph for such Task, than I've created ?
I could have written Only such long code (for such a simple thing)
import numpy as np
import tensorflow as tf
from keras import backend as K
x= np.array([[1100, 1200, 1300, 1400]] ) # vals
y= np.array([[10, 50, 30, 5]] ) # weights
inpS= tf.keras.layers.Input(shape=(4,), batch_size=1, name='inp1', dtype='float32')
inpW= tf.keras.layers.Input(shape=(4,), batch_size=1, name='inp2', dtype='float32')
dot_product = tf.keras.layers.Dot(axes=1, normalize=False, trainable=False)([inpS, inpW])
wsum = tf.keras.layers.Lambda( lambda z: K.sum(z, axis=1, keepdims=True))(inpW)
con= tf.keras.layers.Concatenate(axis=-1)([dot_product, wsum]) #for Multiple input into Lambda layer
wa = tf.keras.layers.Lambda(lambda x: x[0][0]/x[0][1])(con)
model = tf.keras.Model([inpS, inpW], wa)
model.predict([x,y])
RES should be:
117000/95=1231.5789794921875

really, very easy to simplify:
Weighted_Av_Mean = tf.reduce_sum(weights * x) / tf.reduce_sum(weights)
tf.print(Weighted_Av_Mean)
solved due to comment's directive...
OR even in such a way:
# cast x & weights numpy_arrays first
x = tf.dtypes.cast(x,tf.float32)
weights = tf.dtypes.cast(weights,tf.float32)
# gives mean & variance
WAMean2= tf.nn.weighted_moments(
x, axes=[1], frequency_weights= weights, keepdims=False, name=None
)
tf.print('mean: ',WAMean2[0])

Related

Implementation difference between TensorFlow LSTMBlockFusedCell and PyTorch LSTM

I am attempting to translate a tensorflow LSTMBlockFusedCell model to pytorch LSTM, but I'm not getting the same outputs with identical input and weights in both models. I believe this is due to how the weights are being set for the torch model; in the code snippet beneath the TensorFlow weight has the shape (400, 164) whilst the PyTorch weights has the shape (400,64) and (400,100) for torch_lstm.weight_ih_l0 and torch_lstm.weight_hh_l0 respectively. I addressed this inconsistency by using the first 64 elements as weight_ih_l0 and the proceeding 100 elements as weight_hh_l0. According to this article, TensorFlow uses right-multiplication instead of PyTorch left-multiplication which is why I need to transpose the weight. Also I am setting the bias to 0 (rendering it useless) for debugging.
import tensorflow as tf
import numpy as np
import torch
time_len, batch_size, input_size, num_units = 50, 1, 64, 100 # L, N, Hin, Hout with torch semantics
# setup tensorflow LSTM
tf_lstm = tf.contrib.rnn.LSTMBlockFusedCell(num_units=num_units)
inp = tf.placeholder(tf.float32, shape=(time_len, batch_size, input_size))
out, c = tf_lstm(inp, dtype=tf.float32)
tf_weight = tf_lstm.weights[0]
tf_bias = tf_lstm.weights[1]
# initialize weights
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
# tf forward pass
a = np.random.randn(time_len, batch_size, input_size).astype(np.float32) # input
b = np.zeros(tf_bias.shape) # set lstm bias to zero
tf_out, lstm_weight, lstm_bias = sess.run([out, tf_weight, tf_bias], {inp: a, tf_bias: b})
assert (lstm_bias == 0).all() # make sure lstm bias was 0
# setup pytorch LSTM
torch_lstm = torch.nn.LSTM(input_size=input_size, hidden_size=num_units, num_layers=1, bias=False)
# set torch weights same as tensorflow weights (is this correct?)
w1 = lstm_weight[:input_size, :] # first 64 elements
w2 = lstm_weight[input_size:, :] # proceeding 100 elements
torch_lstm.weight_ih_l0.data = torch.tensor(w1.T) # transpose and set first weight
torch_lstm.weight_hh_l0.data = torch.tensor(w2.T) # transpose and set second weight
# torch forward pass
torch_out, (hn, cn) = torch_lstm(torch.tensor(a))
torch_out = torch_out.detach().numpy() # convert to numpy for compatibility
# compare
assert torch_out.shape == tf_out.shape
print("np.allclose(torch_out, tf_out) = ", np.allclose(torch_out, tf_out))
print("normalized difference: ", np.linalg.norm(torch_out - tf_out))
output:
np.allclose(torch_out, tf_out) = False
normalized difference: 10.741002
Expected output:
np.allclose(torch_out, tf_out) = True
normalized difference: ~0.0
I am running on cpu with the following dependencies:
numpy==1.21.5
tensorflow-gpu==1.14.0
torch==1.11.0
I am running tensorflow v1, cpu version should work, the python wheel is available here for python<=3.7.
Any help is appreciated.
I believe I solved this by changing the order of weight associated with each gate and setting forget_bias=0.0 in LSTMBlockFusedCell:
import tensorflow as tf
import numpy as np
import torch
import itertools as it
time_len, batch_size, input_size, num_units = 50, 1, 64, 100 # L, N, Hin, Hout with torch semantics
# setup tensorflow LSTM
tf_lstm = tf.contrib.rnn.LSTMBlockFusedCell(num_units=num_units, forget_bias=0.0, dtype=tf.float32)
inp = tf.placeholder(tf.float32, shape=(time_len, batch_size, input_size))
out, c = tf_lstm(inp, dtype=tf.float32)
tf_weight = tf_lstm.weights[0]
tf_bias = tf_lstm.weights[1]
# initialize weights
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
# tf forward pass
a = np.random.randn(*inp.shape).astype(np.float32) # input
b = np.zeros(tf_bias.shape) # set lstm bias to zero
tf_out, lstm_weight, lstm_bias = sess.run([out, tf_weight, tf_bias], {inp: a, tf_bias: b})
assert (lstm_bias == 0).all() # make sure lstm bias was 0
# setup pytorch LSTM
torch_lstm = torch.nn.LSTM(input_size=input_size, hidden_size=num_units, num_layers=1, bias=False)
# weights associated with each gate
i = lstm_weight[:, 0:100].copy(), 'i'
f = lstm_weight[:, 100:200].copy(), 'f'
o = lstm_weight[:, 200:300].copy(), 'o'
g = lstm_weight[:, 300:400].copy(), 'g'
for i,f,o,g in it.permutations([i,f,o,g], 4):
print(*[x[1] for x in (i,f,o,g)])
i,f,o,g = (x[0] for x in (i,f,o,g))
lstm_weight = np.concatenate([i,f,o,g], axis=1)
# set torch weights same as tensorflow weights
w1 = lstm_weight[:input_size, :] # first 64 elements
w2 = lstm_weight[input_size:, :] # proceeding 100 elements
torch_lstm.weight_ih_l0.data = torch.tensor(w1.T) # transpose and set first weight
torch_lstm.weight_hh_l0.data = torch.tensor(w2.T) # transpose and set second weight
# torch forward pass
torch_out, (hn, cn) = torch_lstm(torch.tensor(a))
torch_out = torch_out.detach().numpy() # convert to numpy for compatibility
# compare
assert torch_out.shape == tf_out.shape
print("np.allclose(torch_out, tf_out) = ", np.allclose(torch_out, tf_out))
print("normalized difference: ", np.linalg.norm(torch_out - tf_out))
This will print the difference for all permutations of gate weights, the combination i o f g gave a difference of 1.7814435e-06 which is close enough.

Unexpected input data type. Actual: (tensor(double)) , expected: (tensor(float))

I am learning this new ONNX framework that allows us to deploy the deep learning (and others) model into production.
However, there is one thing I am missing. I thought that the main reason for having such a framework is so that for inference purposes e.g. when we have a trained model and want to use it in a different venv (where for example we cannot have PyTorch) the model still can be used.
I have preped a "from scratch" example here:
# Modules
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader, TensorDataset
import torchvision
import onnx
import onnxruntime
import matplotlib.pyplot as plt
import numpy as np
# %config Completer.use_jedi = False
# MNIST Example dataset
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST(
'data', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
])),
batch_size=800)
# Take data and labels "by hand"
inputs_batch, labels_batch = next(iter(train_loader))
# Simple Model
class CNN(nn.Module):
def __init__(self, in_channels, num_classes):
super(CNN, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels,
out_channels = 10, kernel_size = (3, 3), stride = (1, 1), padding=(1, 1))
self.pool = nn.MaxPool2d(kernel_size=(2, 2), stride = (2, 2))
self.conv2 = nn.Conv2d(in_channels = 10, out_channels=16, kernel_size = (3, 3), stride = (1, 1), padding=(1, 1))
self.fc1 = nn.Linear(16*7*7, num_classes)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.pool(x)
x = F.relu(self.conv2(x))
x = self.pool(x)
x = x.reshape(x.shape[0], -1)
x = self.fc1(x)
return x
# Training setting
device = 'cpu'
batch_size = 64
learning_rate = 0.001
n_epochs = 10
# Dataset prep
dataset = TensorDataset(inputs_batch, labels_batch)
TRAIN_DF = DataLoader(dataset = dataset, batch_size = batch_size, shuffle = True)
# Model Init
model = CNN(in_channels=1, num_classes=10)
optimizer = optim.Adam(model.parameters(), lr = learning_rate)
# Training Loop
for epoch in range(n_epochs):
for data, labels in TRAIN_DF:
model.train()
# Send Data to GPU
data = data.to(device)
# Send Data to GPU
labels = labels.to(device)
# data = data.reshape(data.shape[0], -1)
# Forward
pred = model(data)
loss = F.cross_entropy(pred, labels)
# Backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Check Accuracy
def check_accuracy(loader, model):
num_correct = 0
num_total = 0
model.eval()
with torch.no_grad():
for x, y in loader:
x = x.to(device)
y = y.to(device)
# x = x.reshape(x.shape[0], -1)
scores = model(x)
_, pred = scores.max(1)
num_correct += (pred == y).sum()
num_total += pred.size(0)
print(F"Got {num_correct} / {num_total} with accuracy {float(num_correct)/float(num_total)*100: .2f}")
check_accuracy(TRAIN_DF, model)
# Inference with ONNX
# Create Artifical data of the same size
img_size = 28
dummy_data = torch.randn(1, img_size, img_size)
dummy_input = torch.autograd.Variable(dummy_data).unsqueeze(0)
input_name = "input"
output_name = "output"
model_eval = model.eval()
torch.onnx.export(
model_eval,
dummy_input,
"model_CNN.onnx",
input_names=["input"],
output_names=["output"],
)
# Take Random Image from Training Data
X_pred = data[4].unsqueeze(0)
# Convert the Tensor image to PURE numpy and pretend we are working in venv where we only have numpy - NO PYTORCH
X_pred_np = X_pred.numpy()
X_pred_np = np.array(X_pred_np)
IMG_Rando = np.random.rand(1, 1, 28, 28)
np.shape(X_pred_np) == np.shape(IMG_Rando)
ort_session = onnxruntime.InferenceSession(
"model_CNN.onnx"
)
def to_numpy(tensor):
return (
tensor.detach().gpu().numpy()
if tensor.requires_grad
else tensor.cpu().numpy()
)
# compute ONNX Runtime output prediction
# WORKS
# ort_inputs = {ort_session.get_inputs()[0].name: X_pred_np}
# DOES NOT WORK
ort_inputs = {ort_session.get_inputs()[0].name: IMG_Rando}
# WORKS
# ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(X_pred)}
ort_outs = ort_session.run(None, ort_inputs)
ort_outs
Firstly, we create a simple model and train it on the MNIST dataset.
Then we export the trained model using the ONNX framework.
Now, when I want to classify an image using the X_pred_np It works even though it is a "pure" NumPy, which is what I want.
However, I suspect that this particular case works only because it has been derived from the PyTorch tensor object, and thus "under the hood" it still has PyTorch attributes.
While when I try to inference on the random "pure" NumPy object IMG_Rando, there seems to be a problem:
Unexpected input data type. Actual: (tensor(double)) , expected: (tensor(float)).
Referring that PyTorch form is needed.
Is there a way how to be able to use only numpy Images for the ONNX predictions?. So the inference can be performed in separated venv where no pytorch is installed?
Secondly, is there a way that ONNX would remember the actual classes?
In this particular case, the index corresponds to the label of the image. However, in animal classification, ONNX would not provide us with the "DOG" and "CAT" and other labels but would only provide us the index of the predicted label. Which we would need to run throw our own "prediction dictionary" so we know that the fifth label is associated with "cat" and sixth label is associated with "dog" etc.
Numpy defaults to float64 while pytorch defaults to float32. Cast the input to float32 before the inference:
IMG_Rando = np.random.rand(1, 1, 28, 28).astype(np.float32)
double is short for double-precision floating-point format, which is a floating point number representation on 64 bits, while float refers to a floating point number on 32 bits.
As an improvement to the accepted answer, the idiomatic way to generate random numbers in Numpy is now by using a Generator. This offers the benefit of being able to create the array in the right type directly, rather than using the expensive astype operation, which copies the array (as in the accepted answer). Thus, the improved solution would look like:
rng = np.random.default_rng() # set seed if desired
IMG_Rando = rng.random((1, 1, 28, 28), dtype=np.float32)

Visualizing self attention weights for sequence addition problem with LSTM?

I am using Self Attention layer from here for a simple problem of adding all the numbers in a sequence that come before a delimiter. With training, I expect the neural network to learn which numbers to add and using Self Attention layer, I expect to visualize where the model is focusing. The code to reproduce the the results is following
import os
import sys
import matplotlib.pyplot as plt
import numpy
import numpy as np
from keract import get_activations
from tensorflow.keras import Sequential
from tensorflow.keras.callbacks import Callback
from tensorflow.keras.layers import Dense, Dropout, LSTM
from attention import Attention # https://github.com/philipperemy/keras-attention-mechanism
def add_numbers_before_delimiter(n: int, seq_length: int, delimiter: float = 0.0,
index_1: int = None) -> (np.array, np.array):
"""
Task: Add all the numbers that come before the delimiter.
x = [1, 2, 3, 0, 4, 5, 6, 7, 8, 9]. Result is y = 6.
#param n: number of samples in (x, y).
#param seq_length: length of the sequence of x.
#param delimiter: value of the delimiter. Default is 0.0
#param index_1: index of the number that comes after the first 0.
#return: returns two numpy.array x and y of shape (n, seq_length, 1) and (n, 1).
"""
x = np.random.uniform(0, 1, (n, seq_length))
y = np.zeros(shape=(n, 1))
for i in range(len(x)):
if index_1 is None:
a = np.random.choice(range(1, len(x[i])), size=1, replace=False)
else:
a = index_1
y[i] = np.sum(x[i, 0:a])
x[i, a] = delimiter
x = np.expand_dims(x, axis=-1)
return x, y
def main():
numpy.random.seed(7)
# data. definition of the problem.
seq_length = 20
x_train, y_train = add_numbers_before_delimiter(20_000, seq_length)
x_val, y_val = add_numbers_before_delimiter(4_000, seq_length)
# just arbitrary values. it's for visual purposes. easy to see than random values.
test_index_1 = 4
x_test, _ = add_numbers_before_delimiter(10, seq_length, 0, test_index_1)
# x_test_mask is just a mask that, if applied to x_test, would still contain the information to solve the problem.
# we expect the attention map to look like this mask.
x_test_mask = np.zeros_like(x_test[..., 0])
x_test_mask[:, test_index_1:test_index_1 + 1] = 1
model = Sequential([
LSTM(100, input_shape=(seq_length, 1), return_sequences=True),
SelfAttention(name='attention_weight'),
Dropout(0.2),
Dense(1, activation='linear')
])
model.compile(loss='mse', optimizer='adam')
print(model.summary())
output_dir = 'task_add_two_numbers'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
max_epoch = int(sys.argv[1]) if len(sys.argv) > 1 else 200
class VisualiseAttentionMap(Callback):
def on_epoch_end(self, epoch, logs=None):
attention_map = get_activations(model, x_test, layer_names='attention_weight')['attention_weight']
# top is attention map.
# bottom is ground truth.
plt.imshow(np.concatenate([attention_map, x_test_mask]), cmap='hot')
iteration_no = str(epoch).zfill(3)
plt.axis('off')
plt.title(f'Iteration {iteration_no} / {max_epoch}')
plt.savefig(f'{output_dir}/epoch_{iteration_no}.png')
plt.close()
plt.clf()
model.fit(x_train, y_train, validation_data=(x_val, y_val), epochs=max_epoch,
batch_size=64, callbacks=[VisualiseAttentionMap()])
if __name__ == '__main__':
main()
However, I get following results attention weights
[Please click the link]1 to view weights during training.
I expect the attention to focus on all values before the delimiter. The white below represents ground truth while the upper half part represents weights for 10 samples.

Exploding LOSS in TensorFlow 2.0 Linear Regression Example using GradientTape

I'm trying to construct a little educational example for multivariate linear regresssion, but the LOSS is increasing until it explodes rather than getting smaller, any idea?
import tensorflow as tf
tf.__version__
import numpy as np
data = np.array(
[
[100,35,35,12,0.32],
[101,46,35,21,0.34],
[130,56,46,3412,12.42],
[131,58,48,3542,13.43]
]
)
x = data[:,1:-1]
y_target = data[:,-1]
def loss_function(y, pred):
return tf.reduce_mean(tf.square(y - pred))
def train(b, w, x, y, lr=0.012):
with tf.GradientTape() as t:
current_loss = loss_function(y, linear_model(x))
lr_weight, lr_bias = t.gradient(current_loss, [w, b])
w.assign_sub(lr * lr_weight)
b.assign_sub(lr * lr_bias)
epochs = 80
for epoch_count in range(epochs):
real_loss = loss_function(y_target, linear_model(x))
train(b, w, x, y_target, lr=0.12)
print(f"Epoch count {epoch_count}: Loss value: {real_loss.numpy()}")
This even happens if I initialize the weights with the "correct" values (found out via a scikit-learn regressor)
w = tf.Variable([-1.76770250e-04,3.46688912e-01,2.43827475e-03],dtype=tf.float64)
b = tf.Variable(-11.837184241807234,dtype=tf.float64)
Here's how you might use a TF2 optimizer for a toy example (as per the comment). I know this is not the answer but I didn't want to post this in the comments section, as it will mess up the indentation and all that.
tf_x = tf.Variable(tf.constant(2.0,dtype=tf.float32),name='x')
optimizer = tf.optimizers.SGD(learning_rate=0.1)
# Optimizing tf_x using gradient tape
x_series, y_series = [],[]
for step in range(5):
x_series.append(tf_x.numpy().item())
with tf.GradientTape() as tape:
tf_y = tf_x**2
gradients = tape.gradient(tf_y, tf_x)
optimizer.apply_gradients(zip([gradients], [tf_x]))
Based on #thushv89's input, I'm providing here an intermediate solution using a TF2 Optimizer which is working, although this is not 100% answering my question
import tensorflow as tf
tf.__version__
import numpy as np
data = np.array(
[
[100,35,35,12,0.32],
[101,46,35,21,0.34],
[130,56,46,3412,12.42],
[131,58,48,3542,13.43]
]
)
x = data[:,1:-1]
y_target = data[:,-1]
w = tf.Variable([1,1,1],dtype=tf.float64)
b = tf.Variable(1,dtype=tf.float64)
def linear_model(x):
return b + tf.tensordot(x,w,axes=1)
optimizer = tf.keras.optimizers.Adam()
loss_object = tf.keras.losses.MeanSquaredLogarithmicError()
def train_step(x, y):
with tf.GradientTape() as tape:
predicted = linear_model(x)
loss_value = loss_object(y, predicted)
print(f"Loss Value:{loss_value}")
grads = tape.gradient(loss_value, [b,w])
optimizer.apply_gradients(zip(grads, [b,w]))
def train(epochs):
for epoch in range(epochs):
train_step(x, y_target)
print ('Epoch {} finished'.format(epoch))
train(epochs = 1000)

SGD converges but batch learning does not, simple regression in tensorflow

I have run into an issue where batch learning in tensorflow fails to converge to the correct solution for a simple convex optimization problem, whereas SGD converges. A small example is found below, in the Julia and python programming languages, I have verified that the same exact behaviour results from using tensorflow from both Julia and python.
I'm trying to fit the linear model y = s*W + B with parameters W and B
The cost function is quadratic, so the problem is convex and should be easily solved using a small enough step size. If I feed all data at once, the end result is just a prediction of the mean of y. If, however, I feed one datapoint at the time (commented code in julia version), the optimization converges to the correct parameters very fast.
I have also verified that the gradients computed by tensorflow differs between the batch example and summing up the gradients for each datapoint individually.
Any ideas on where I have failed?
using TensorFlow
s = linspace(1,10,10)
s = [s reverse(s)]
y = s*[1,4] + 2
session = Session(Graph())
s_ = placeholder(Float32, shape=[-1,2])
y_ = placeholder(Float32, shape=[-1,1])
W = Variable(0.01randn(Float32, 2,1), name="weights1")
B = Variable(Float32(1), name="bias3")
q = s_*W + B
loss = reduce_mean((y_ - q).^2)
train_step = train.minimize(train.AdamOptimizer(0.01), loss)
function train_critic(s,targets)
for i = 1:1000
# for i = 1:length(y)
# run(session, train_step, Dict(s_ => s[i,:]', y_ => targets[i]))
# end
ts = run(session, [loss,train_step], Dict(s_ => s, y_ => targets))[1]
println(ts)
end
v = run(session, q, Dict(s_ => s, y_ => targets))
plot(s[:,1],v, lab="v (Predicted value)")
plot!(s[:,1],y, lab="y (Correct value)")
gui();
end
run(session, initialize_all_variables())
train_critic(s,y)
Same code in python (I'm not a python user so this might be ugly)
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import sklearn.datasets
import tensorflow as tf
from tensorflow.python.framework.ops import reset_default_graph
s = np.linspace(1,10,50).reshape((50,1))
s = np.concatenate((s,s[::-1]),axis=1).astype('float32')
y = np.add(np.matmul(s,[1,4]), 2).astype('float32')
reset_default_graph()
rng = np.random
s_ = tf.placeholder(tf.float32, [None, 2])
y_ = tf.placeholder(tf.float32, [None])
weight_initializer = tf.truncated_normal_initializer(stddev=0.1)
with tf.variable_scope('model'):
W = tf.get_variable('W', [2, 1],
initializer=weight_initializer)
B = tf.get_variable('B', [1],
initializer=tf.constant_initializer(0.0))
q = tf.matmul(s_, W) + B
loss = tf.reduce_mean(tf.square(tf.sub(y_ , q)))
optimizer = tf.train.AdamOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(loss)
num_epochs = 200
train_cost= []
with tf.Session() as sess:
init = tf.initialize_all_variables()
sess.run(init)
for e in range(num_epochs):
feed_dict_train = {s_: s, y_: y}
fetches_train = [train_op, loss]
res = sess.run(fetches=fetches_train, feed_dict=feed_dict_train)
train_cost = [res[1]]
print train_cost
The answer turned out to be that when I fed in the targets, I fed a vector and not an Nx1 matrix. The operation y_-q then turned into a broadcast operation and instead of returning the elementwise difference, it returned an NxN matrix with the desired difference along the diagonal. In Julia, I solved this by modifying the line
train_critic(s,y)
to
train_critic(s,reshape(y, length(y),1))
to ensure y being a matrix.
A subtle error that took me a very long time to find! Part of the confusion was that TensorFlow seems to treat vectors as row vectors and not as column vectors like Julia, hence the broadcast operation in y_-q