Evaluating the squared term of a gaussian kernel for having a covariance matrix for multi-dimensional inputs [duplicate] - numpy

I have the following code. It is taking forever in Python. There must be a way to translate this calculation into a broadcast...
def euclidean_square(a,b):
squares = np.zeros((a.shape[0],b.shape[0]))
for i in range(squares.shape[0]):
for j in range(squares.shape[1]):
diff = a[i,:] - b[j,:]
sqr = diff**2.0
squares[i,j] = np.sum(sqr)
return squares

You can use np.einsum after calculating the differences in a broadcasted way, like so -
ab = a[:,None,:] - b
out = np.einsum('ijk,ijk->ij',ab,ab)
Or use scipy's cdist with its optional metric argument set as 'sqeuclidean' to give us the squared euclidean distances as needed for our problem, like so -
from scipy.spatial.distance import cdist
out = cdist(a,b,'sqeuclidean')

I collected the different methods proposed here, and in two other questions, and measured the speed of the different methods:
import numpy as np
import scipy.spatial
import sklearn.metrics
def dist_direct(x, y):
d = np.expand_dims(x, -2) - y
return np.sum(np.square(d), axis=-1)
def dist_einsum(x, y):
d = np.expand_dims(x, -2) - y
return np.einsum('ijk,ijk->ij', d, d)
def dist_scipy(x, y):
return scipy.spatial.distance.cdist(x, y, "sqeuclidean")
def dist_sklearn(x, y):
return sklearn.metrics.pairwise.pairwise_distances(x, y, "sqeuclidean")
def dist_layers(x, y):
res = np.zeros((x.shape[0], y.shape[0]))
for i in range(x.shape[1]):
res += np.subtract.outer(x[:, i], y[:, i])**2
return res
# inspired by the excellent https://github.com/droyed/eucl_dist
def dist_ext1(x, y):
nx, p = x.shape
x_ext = np.empty((nx, 3*p))
x_ext[:, :p] = 1
x_ext[:, p:2*p] = x
x_ext[:, 2*p:] = np.square(x)
ny = y.shape[0]
y_ext = np.empty((3*p, ny))
y_ext[:p] = np.square(y).T
y_ext[p:2*p] = -2*y.T
y_ext[2*p:] = 1
return x_ext.dot(y_ext)
# https://stackoverflow.com/a/47877630/648741
def dist_ext2(x, y):
return np.einsum('ij,ij->i', x, x)[:,None] + np.einsum('ij,ij->i', y, y) - 2 * x.dot(y.T)
I use timeit to compare the speed of the different methods. For the comparison, I use vectors of length 10, with 100 vectors in the first group, and 1000 vectors in the second group.
import timeit
p = 10
x = np.random.standard_normal((100, p))
y = np.random.standard_normal((1000, p))
for method in dir():
if not method.startswith("dist_"):
continue
t = timeit.timeit(f"{method}(x, y)", number=1000, globals=globals())
print(f"{method:12} {t:5.2f}ms")
On my laptop, the results are as follows:
dist_direct 5.07ms
dist_einsum 3.43ms
dist_ext1 0.20ms <-- fastest
dist_ext2 0.35ms
dist_layers 2.82ms
dist_scipy 0.60ms
dist_sklearn 0.67ms
While the two methods dist_ext1 and dist_ext2, both based on the idea of writing (x-y)**2 as x**2 - 2*x*y + y**2, are very fast, there is a downside: When the distance between x and y is very small, due to cancellation error the numerical result can sometimes be (very slightly) negative.

Another solution besides using cdist is the following
difference_squared = np.zeros((a.shape[0], b.shape[0]))
for dimension_iterator in range(a.shape[1]):
difference_squared = difference_squared + np.subtract.outer(a[:, dimension_iterator], b[:, dimension_iterator])**2.

Related

How to do the optimization for a mean squares error in a Python code faster

(I'm new to stack overflow, but I will try to write my problem the best way I can)
For my thesis, I need to do the optimization for a mean squares error problem as fast as possible. For this problem, I used to use the scipy.optimize.minimize method (with and without jacobian). However; the optimization was still too slow for what we wanted to do. (This program is running on mac with python 3.9)
So first, this is the function to minimize (I already tried to simplify the formula, but it didn't change the speed of the program
def _residuals_mse(coef, unshimmed_vec, coil_mat, factor):
""" Objective function to minimize the mean squared error (MSE)
Args:
coef (numpy.ndarray): 1D array of channel coefficients
unshimmed_vec (numpy.ndarray): 1D flattened array (point)
coil_mat (numpy.ndarray): 2D flattened array (point, channel) of masked coils
(axis 0 must align with unshimmed_vec)
factor (float): Devise the result by 'factor'. This allows to scale the output for the minimize function to avoid positive directional linesearch
Returns:
scalar: Residual for least squares optimization
"""
# MSE regularized to minimize currents
return np.mean((unshimmed_vec + np.sum(coil_mat * coef, axis=1, keepdims=False)) ** 2) / factor + \ (self.reg_factor * np.mean(np.abs(coef) / self.reg_factor_channel))
This is the jacobian of the function ( There is maybe a way to make it faster but I didn't succeed to do it)
def _residuals_mse_jacobian( coef, unshimmed_vec, coil_mat, factor):
""" Jacobian of the function that we want to minimize, note that normally b is calculates somewhere else
Args:
coef (numpy.ndarray): 1D array of channel coefficients
unshimmed_vec (numpy.ndarray): 1D flattened array (point) of the masked unshimmed map
coil_mat (numpy.ndarray): 2D flattened array (point, channel) of masked coils
(axis 0 must align with unshimmed_vec)
factor (float): integer
Returns:
jacobian (numpy.ndarray) : 1D array of the gradient of the mse function to minimize
"""
b = (2 / (unshimmed_vec.size * factor))
jacobian = np.array([
self.b * np.sum((unshimmed_vec + np.matmul(coil_mat, coef)) * coil_mat[:, j]) + \
np.sign(coef[j]) * (self.reg_factor / (9 * self.reg_factor_channel[j]))
for j in range(coef.size)
])
return jacobian
And so this is the "main" program
import numpy as np
import scipy.optimize as opt
from numpy.random import default_rng
rand = default_rng(seed=0)
reg_factor_channel = rand.integers(1, 10, size=9)
coef = np.zeros(9)
unshimmed_vec = np.random.randint(100, size=(150))
coil_mat = np.random.randint(100, size=(150,9))
factor = 2
self.reg_factor = 5
currents_sp = opt.minimize(_residuals_mse, coef,
args=(unshimmed_vec, coil_mat, factor),
method='SLSQP',
jac = _residuals_mse_jacobian,
options={'maxiter': 1000})
On my computer, the optimization takes around 40 ms for a dataset of this size.
The matrices in the example are usually obtained after some modifications and can be way way bigger, but here to make it clear and easy to test, I choose some arbitrary ones. In addition, this optimization is done many times (Sometimes up to 50 times), so, we are already doing multiprocessing (To do different optimization at the same time). However on mac, mp is slow to start because of the spawning method (because fork is not stable on python 3.9). For this reason, I am trying to make the optimization as fast as possible to maybe remove multiprocessing.
Do any of you know how to make this code faster in python ? Also, this code will be available in open source for users, so I can only free solver (unlike MOSEK)
Edit : I tried to run the code by using the CVXPY model, with this code after the one just above:
m = currents_0.size
n = unshimmed_vec.size
coef = cp.Variable(m)
unshimmed_vec2 = cp.Parameter((n))
coil_mat2 = cp.Parameter((n,m))
unshimmed_vec2.value = unshimmed_vec
coil_mat2.value = coil_mat
x1 = unshimmed_vec2 + cp.matmul(coil_mat2,coef)
x2 = cp.sum_squares(x1) / (factor*n)
x3 = self.reg_factor / self.reg_factor_channel# cp.abs(coef) / m
obj = cp.Minimize(x2 + x3)
prob = cp.Problem(obj)
prob.solve(solver=SCS)
However, this is slowing even more my code, and it gives me a different value than with scipy.optimize.minimize, so does anyone see a problem in this code ?
I will make some sweeping assumptions:
that we can ignore _criteria_func and instead optimize _residuals_mse;
that none of this needs to be in a class;
that, unlike in your example, reg_factor_channel will never have zeros; and
that your bounds and constraints can all be ignored (though you have not made this clear).
Recognize that your inner expressions can be simplified:
np.sum(coil_mat * coef), since it uses a broadcast, is really just a matrix multiplication
mean(**2) on a vector is really just a self-dot-product divided by the length
Some of your scalar factors and vector coefficients can be combined outside of the function
This leaves us with the following, starting without the Jacobian:
import numpy as np
from numpy.random import default_rng
from scipy import optimize as opt
from timeit import timeit
rand = default_rng(seed=0)
reg_factor = 5
reg_factor_channel = rand.integers(1, 10, size=9)
reg_vector = reg_factor / len(reg_factor_channel) / reg_factor_channel
def residuals_mse(
coef: np.ndarray,
unshimmed_vec: np.ndarray,
coil_mat: np.ndarray,
factor: float,
) -> float:
inner = unshimmed_vec + coil_mat#coef
return inner.dot(inner)/len(inner)/factor + np.abs(coef).dot(reg_vector)
def old_residuals_mse(coef, unshimmed_vec, coil_mat, factor):
return np.mean(
(unshimmed_vec + np.sum(coil_mat * coef, axis=1, keepdims=False)) ** 2) / factor + (
reg_factor * np.mean(np.abs(coef) / reg_factor_channel))
def main() -> None:
unshimmed_vec = rand.integers(100, size=150)
coil_mat = rand.integers(100, size=(150, 9))
factor = 2
args = unshimmed_vec, coil_mat, factor
currents_sp = None
def run():
nonlocal currents_sp
currents_sp = opt.minimize(
fun=residuals_mse,
x0=np.zeros_like(reg_factor_channel),
args=args,
method='SLSQP',
)
t = timeit(run, number=1)
print(currents_sp)
print(t, 'seconds')
r_old = old_residuals_mse(currents_sp.x, *args)
assert np.isclose(r_old, currents_sp.fun)
if __name__ == '__main__':
main()
with output
message: Optimization terminated successfully
success: True
status: 0
fun: 435.166150155064
x: [-1.546e-01 -8.305e-02 -1.637e-01 -1.106e-01 -1.033e-01
-8.792e-02 -9.908e-02 -8.666e-02 -1.217e-01]
nit: 7
jac: [-1.179e-01 -1.621e-01 -1.112e-01 -1.765e-01 -1.678e-01
-1.570e-01 -1.456e-01 -1.722e-01 -1.299e-01]
nfev: 94
njev: 7
0.012324300012551248 seconds
The Jacobian does indeed help, but has been written in a way that is not properly vectorised. Once vectorised it looks like:
import numpy as np
from numpy.random import default_rng
from scipy import optimize as opt
from timeit import timeit
rand = default_rng(seed=0)
reg_factor = 5
reg_factor_channel = rand.integers(1, 10, size=9)
reg_vector = reg_factor / len(reg_factor_channel) / reg_factor_channel
def residuals_mse(
coef: np.ndarray,
unshimmed_vec: np.ndarray,
coil_mat: np.ndarray,
factor: float,
) -> float:
inner = unshimmed_vec + coil_mat#coef
return inner.dot(inner)/len(inner)/factor + np.abs(coef).dot(reg_vector)
def old_residuals_mse(coef, unshimmed_vec, coil_mat, factor):
return np.mean(
(unshimmed_vec + np.sum(coil_mat * coef, axis=1, keepdims=False)) ** 2) / factor + (
reg_factor * np.mean(np.abs(coef) / reg_factor_channel))
def residuals_mse_jacobian(
coef: np.ndarray,
unshimmed_vec: np.ndarray,
coil_mat: np.ndarray,
factor: float,
) -> np.ndarray:
b = 2 / unshimmed_vec.size / factor
return b * (
unshimmed_vec + coil_mat#coef
)#coil_mat + np.sign(coef) * reg_vector
def old_residuals_mse_jacobian(coef, unshimmed_vec, coil_mat, factor):
b = (2 / (unshimmed_vec.size * factor))
jacobian = np.array([
b * np.sum((unshimmed_vec + np.matmul(coil_mat, coef)) * coil_mat[:, j]) +
np.sign(coef[j]) * (reg_factor / (9 * reg_factor_channel[j]))
for j in range(coef.size)
])
return jacobian
def main() -> None:
unshimmed_vec = rand.integers(100, size=150)
coil_mat = rand.integers(100, size=(150, 9))
factor = 2
args = unshimmed_vec, coil_mat, factor
currents_sp = None
def run():
nonlocal currents_sp
currents_sp = opt.minimize(
fun=residuals_mse,
x0=np.zeros_like(reg_factor_channel),
args=args,
method='SLSQP',
jac=residuals_mse_jacobian,
)
t = timeit(run, number=1)
print(currents_sp)
print(t, 'seconds')
r_old = old_residuals_mse(currents_sp.x, *args)
assert np.isclose(r_old, currents_sp.fun)
j_new = residuals_mse_jacobian(currents_sp.x, *args)
j_old = old_residuals_mse_jacobian(currents_sp.x, *args)
assert np.allclose(j_old, j_new)
if __name__ == '__main__':
main()
message: Optimization terminated successfully
success: True
status: 0
fun: 435.1661470650057
x: [-1.546e-01 -8.305e-02 -1.637e-01 -1.106e-01 -1.033e-01
-8.792e-02 -9.908e-02 -8.666e-02 -1.217e-01]
nit: 7
jac: [-4.396e-02 -8.791e-02 -3.385e-02 -9.817e-02 -9.516e-02
-8.223e-02 -7.154e-02 -9.907e-02 -5.939e-02]
nfev: 31
njev: 7
0.005059799994342029 seconds
I would suggest trying the library NLOpt. It also has SLSQP as nonlinear solver (among many others), and I found it to be faster in many instances than SciPy optimize.
However, you’re talking 50 ms per run, you won’t get down to 5 ms.
If you’re looking to squeeze as much performance as possible, I would probably go to the metal and re-implement the objective function and Jacobian in Fortran (or C) and then use f2py (or Cython) to bridge them to Python. Looks a bit of an overkill to me though.

Scipy Spatial Distance Sub-module rejects Numpy Array

I have a dataframe named, "df", with 4 columns. Three columns are independent variables: x1, x2, and x3. And, the other variable, y, is the dependent variable
I would like to calculate the distance, "pdist" between the dependent variable and each of the dependent variables, so I first converted each column to a numpy array as follows:
y = df[["y"]].values
x1 = df[["x1"]].values
x2 = df[["x2"]].values
x3 = df[["x3"]].values
When I feed these arrays through this coding pipeline I got from Github:
import numpy as np
from scipy.spatial.distance import pdist
def distance_correlation(Xval, Yval, pval=True, nruns=500):
X, Y = np.atleast_1d(Xval),np.atleast_1d(Yval)
if np.prod(X.shape) == len(X):X = X[:, None]
if np.prod(Y.shape) == len(Y):Y = Y[:, None]
X, Y = np.atleast_2d(X),np.atleast_2d(Y)
n = X.shape[0]
if Y.shape[0] != X.shape[0]:raise ValueError('Number of samples must match')
a, b = squareform(pdist(X)),squareform(pdist(Y))
A = a - a.mean(axis=0)[None, :] - a.mean(axis=1)[:, None] + a.mean()
B = b - b.mean(axis=0)[None, :] - b.mean(axis=1)[:, None] + b.mean()
dcov2_xy = (A * B).sum() / float(n * n)
dcov2_xx = (A * A).sum() / float(n * n)
dcov2_yy = (B * B).sum() / float(n * n)
dcor = np.sqrt(dcov2_xy) / np.sqrt(np.sqrt(dcov2_xx) * np.sqrt(dcov2_yy))
if pval:
greater = 0
for i in range(nruns):
Y_r = copy.copy(Yval)
np.random.shuffle(Y_r)
if distcorr(Xval, Y_r, pval=False) > dcor:
greater += 1
return (dcor, greater / float(nruns))
else:
return dcor
distance_correlation(x1, y, pval=True, nruns=500)
I get this error:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-32-c720c9df4e97> in <module>
----> 1 distance_correlation(bop_sp500, price, pval=True, nruns=500)
<ipython-input-17-e0b3aea12c32> in distance_correlation(Xval, Yval, pval, nruns)
9 n = X.shape[0]
10 if Y.shape[0] != X.shape[0]:raise ValueError('Number of samples must match')
---> 11 a, b = squareform(pdist(X)),squareform(pdist(Y))
12 A = a - a.mean(axis=0)[None, :] - a.mean(axis=1)[:, None] + a.mean()
13 B = b - b.mean(axis=0)[None, :] - b.mean(axis=1)[:, None] + b.mean()
~\Anaconda3\lib\site-packages\scipy\spatial\distance.py in pdist(X, metric, *args, **kwargs)
1997 s = X.shape
1998 if len(s) != 2:
-> 1999 raise ValueError('A 2-dimensional array must be passed.')
2000
2001 m, n = s
ValueError: A 2-dimensional array must be passed..
Could anyone identify where I am going wrong? I know the error originates from the manner in which I created my numpy arrays. But, I have no clues on fixing it.
Please explain it with examples that use my variable definitions. I am new to Python
Ok, so I finally managed to figure out the cause of the problem I faced:
The Numpy array that was being fed into the helper function was a 2d array.
While the helper function required a "Numpy vector"; i.e. a 1d Numpy array.
The best way to create it is to use the numpy.ravel() function. Hence, for my datasets, the code would be as follows (I have broken down the steps for simplicity):
# Create Arrays
y = df[["y"]].values
x1 = df[["x1"]].values
x2 = df[["x2"]].values
x3 = df[["x3"]].values
# Ravel Them
y = y.ravel()
x1 = x1.ravel()
x2 = x2.ravel()
x3 = x3.ravel()

Efficient implementation of factorization machine with matrix operations?

Link is here : https://www.csie.ntu.edu.tw/~r01922136/slides/ffm.pdf (slides 5-6)
Given the following matrices:
X : n * d
W : d * k
Is there an efficient way to calculate the n x 1 matrix using only matrix operations (eg. numpy, tensorflow), where the jth element is :
EDIT:
Current attempt is this, but obviously it's not very space efficient, as it requires storing matrices of size n*d*d :
n = 1000
d = 256
k = 32
x = np.random.normal(size=[n,d])
w = np.random.normal(size=[d,k])
xxt = np.matmul(x.reshape([n,d,1]),x.reshape([n,1,d]))
wwt = np.matmul(w.reshape([1,d,k]),w.reshape([1,k,d]))
output = xxt*wwt
output = np.sum(output,(1,2))
Avoid large temporary arrays
Not all types of algorithms are that easily or obviously to vectorize. The np.sum(xxt*wwt) can be rewritten using np.einsum. This should be faster than your solution, but has some other limitations (eg. no multithreading).
I would therefor suggest using a compiler like Numba.
Example
import numpy as np
import numba as nb
import time
#nb.njit(fastmath=True,parallel=True)
def factorization_nb(w,x):
n = x.shape[0]
d = x.shape[1]
k = w.shape[1]
output=np.empty(n,dtype=w.dtype)
wwt=np.dot(w.reshape((d,k)),w.reshape((k,d)))
for i in nb.prange(n):
sum=0.
for j in range(d):
for jj in range(d):
sum+=x[i,j]*x[i,jj]*wwt[j,jj]
output[i]=sum
return output
def factorization_orig(w,x):
n = x.shape[0]
d = x.shape[1]
k = w.shape[1]
xxt = np.matmul(x.reshape([n,d,1]),x.reshape([n,1,d]))
wwt = np.matmul(w.reshape([1,d,k]),w.reshape([1,k,d]))
output = xxt*wwt
output = np.sum(output,(1,2))
return output
Mesuring Performance
n = 1000
d = 256
k = 32
x = np.random.normal(size=[n,d])
w = np.random.normal(size=[d,k])
#first call has some compilation overhead
res_1=factorization_nb(w,x)
t1=time.time()
for i in range(100):
res_1=factorization_nb(w,x)
#res_2=factorization_orig(w,x)
print(time.time()-t1)
Timings
factorization_nb: 4.2 ms per iteration
factorization_orig: 460 ms per iteration (110x speedup)
For an einsum implemtnation in pytorch, it would be something like
V = torch.randn([50, 10])
x = torch.randn([50])
result = (torch.einsum('ik,jk,i,j->', V, V, x, x)-torch.einsum('ik,ik,i,i->', V, V, x, x))/2
where we subtract the contribution from the feature weight being dotted with itself.

`scipy.optimize` functions hang even with `maxiter=0`

I am trying to train the MNIST data (which I downloaded from Kaggle) with simple multi-class logistic regression, but the scipy.optimize functions hang.
Here's the code:
import csv
from math import exp
from numpy import *
from scipy.optimize import fmin, fmin_cg, fmin_powell, fmin_bfgs
# Prepare the data
def getIiter(ifname):
"""
Get the iterator from a csv file with filename ifname
"""
ifile = open(ifname, 'r')
iiter = csv.reader(ifile)
iiter.__next__()
return iiter
def parseRow(s):
y = [int(x) for x in s]
lab = y[0]
z = y[1:]
return (lab, z)
def getAllRows(ifname):
iiter = getIiter(ifname)
x = []
l = []
for row in iiter:
lab, z = parseRow(row)
x.append(z)
l.append(lab)
return x, l
def cutData(x, y):
"""
70% training
30% testing
"""
m = len(x)
t = int(m * .7)
return [(x[:t], y[:t]), (x[t:], y[t:])]
def num2IndMat(l):
t = array(l)
tt = [vectorize(int)((t == i)) for i in range(10)]
return array(tt).T
def readData(ifname):
x, l = getAllRows(ifname)
t = [[1] + y for y in x]
return array(t), num2IndMat(l)
#Calculate the cost function
def sigmoid(x):
return 1 / (1 + exp(-x))
vSigmoid = vectorize(sigmoid)
vLog = vectorize(log)
def costFunction(theta, x, y):
sigxt = vSigmoid(dot(x, theta))
cm = (- y * vLog(sigxt) - (1 - y) * vLog(1 - sigxt)) / m / N
return sum(cm)
def unflatten(flatTheta):
return [flatTheta[i * N : (i + 1) * N] for i in range(n + 1)]
def costFunctionFlatTheta(flatTheta):
return costFunction(unflatten(flatTheta), trainX, trainY)
def costFunctionFlatTheta1(flatTheta):
return costFunction(flatTheta.reshape(785, 10), trainX, trainY)
x, y = readData('train.csv')
[(trainX, trainY), (testX, testY)] = cutData(x, y)
m = len(trainX)
n = len(trainX[0]) - 1
N = len(trainY[0])
initTheta = zeros(((n + 1), N))
flatInitTheta = ndarray.flatten(initTheta)
flatInitTheta1 = initTheta.reshape(1, -1)
In the last two lines we flatten initTheta because the fmin{,_cg,_bfgs,_powell} functions seem to only take vectors as the initial value argument x0. I also flatten initTheta using reshape in hope this answer can be of help.
There is no problem computing the cost function which takes up less than 2 seconds on my computer:
print(costFunctionFlatTheta(flatInitTheta), costFunctionFlatTheta1(flatInitTheta1))
# 0.69314718056 0.69314718056
But all the fmin functions hang, even if I set maxiter=0.
e.g.
newFlatTheta = fmin(costFunctionFlatTheta, flatInitTheta, maxiter=0)
or
newFlatTheta1 = fmin(costFunctionFlatTheta1, flatInitTheta1, maxiter=0)
When I interrupt the program, it seems to me it all hangs at lines in optimize.py calling the cost functions, lines like this:
return function(*(wrapper_args + args))
For example, if I use fmin_cg, this would be line 292 in optimize.py (Version 0.5).
How do I solve this problem?
OK I found a way to stop fmin_cg from hanging.
Basically I just need to write a function that computes the gradient of the cost function, and pass it to the fprime parameter of fmin_cg.
def gradient(theta, x, y):
return dot(x.T, vSigmoid(dot(x, theta)) - y) / m / N
def gradientFlatTheta(flatTheta):
return ndarray.flatten(gradient(flatTheta.reshape(785, 10), trainX, trainY))
Then
newFlatTheta = fmin_cg(costFunctionFlatTheta, flatInitTheta, fprime=gradientFlatTheta, maxiter=0)
terminates within seconds, and setting maxiter to a higher number (say 100) one can train the model within reasonable amount of time.
The documentation of fmin_cg says the gradient would be numerically computed if no fprime is given, which is what I suspect caused the hanging.
Thanks to this notebook by zgo2016#Kaggle which helped me find the solution.

hessian of a variable returned by tf.concat() is None

Let x and y be vectors of length N, and z is a function z = f(x,y). In Tensorflow v1.0.0, tf.hessians(z,x) and tf.hessians(z,y) both returns an N by N matrix, which is what I expected.
However, when I concatenate the x and y into a vector p of size 2*N using tf.concat, and run tf.hessian(z, p), it returns error "ValueError: None values not supported."
I understand this is because in the computation graph x,y ->z and x,y -> p, so there is no gradient between p and z. To circumvent the problem, I can create p first, slice it into x and y, but I will have to change a ton of my code. Is there a more elegant way?
related question: Slice of a variable returns gradient None
import tensorflow as tf
import numpy as np
N = 2
A = tf.Variable(np.random.rand(N,N).astype(np.float32))
B = tf.Variable(np.random.rand(N,N).astype(np.float32))
x = tf.Variable(tf.random_normal([N]) )
y = tf.Variable(tf.random_normal([N]) )
#reshape to N by 1
x_1 = tf.reshape(x,[N,1])
y_1 = tf.reshape(y,[N,1])
#concat x and y to form a vector with length of 2*N
p = tf.concat([x,y],axis = 0)
#define the function
z = 0.5*tf.matmul(tf.matmul(tf.transpose(x_1), A), x_1) + 0.5*tf.matmul(tf.matmul(tf.transpose(y_1), B), y_1) + 100
#works , hx and hy are both N by N matrix
hx = tf.hessians(z,x)
hy = tf.hessians(z,y)
#this gives error "ValueError: None values not supported."
#expecting a matrix of size 2*N by 2*N
hp = tf.hessians(z,p)
Compute the hessian by its definition.
gxy = tf.gradients(z, [x, y])
gp = tf.concat([gxy[0], gxy[1]], axis=0)
hp = []
for i in range(2*N):
hp.append(tf.gradients(gp[i], [x, y]))
Because tf.gradients computes the sum of (dy/dx), so when computing the second partial derivative, one should slice the vector into scalars and then compute the gradient. Tested on tf1.0 and python2.