I have the following optimization problem:
Where X and q are endogenous while the other variables are known.
I use scipy minimize function to solve it. I have no problems with the bounds and constraints:
# objective function
def objective(q,s):
return -sumprod(q,s)
def sumprod(l1,l2):
return sum([x*y for x,y in zip(*[l1,l2])])
# constraints
def cons_periodicflow_min(q):
return q.sum()-qpmin
con1 = {'type':'ineq','fun':cons_periodicflow_min}
def cons_periodicflow_max(q):
return qpmax - q.sum()
con2 = {'type':'ineq','fun':cons_periodicflow_max}
def cons_daily_reservoir(q):#xmin,q,X,a,delta):
return X+a-q-delta-xmin
con3 = {'type':'ineq','fun':cons_daily_reservoir}
def cons_end_reservoir(q):#xend,q,X,a,delta):
return X[-1]+a[-1]-q[-1]-delta[-1]-xend
con4 = {'type':'ineq','fun':cons_end_reservoir}
cons=[con1,con2,con3,con4]
# definition of the parameters
T=3
q0 = np.zeros(T)
s0 = np.array([10,10,10])
qmin = [0,0,0]
qmax = [10,10,10]
delta = [1,1,1]
a = [2,2,2]
X = [10,0,0]
qpmax = 50
qpmin=10
b = [(qmin[t],qmax[t]) for t in range(T)]
sol = sco.minimize(objective,q0,bounds=b,constraints=cons)
My only problem is that X depends on q so I need to update X at each time step, can I add it to the minimize function? Else how to do it?
EDIT:
I can express X in the following way (please don't mind the t / t+1 issues):
Therefore the constraint with Xmin can rewrites:
Does it help to express the optimisation problem?
Related
I am new to tensorflow and trying to translate a STAN model into TFP. Here is my TFP model using JointDistributionCoroutineAutoBatched.
def make_joint_distribution_coroutine(Depth,N_RNA):
def model():
## c1 prior
c1 = yield tfd.Gamma(concentration = 1.1, rate = 0.005)
## c2 prior
c2 = yield tfd.Gamma(concentration = 1.1, rate = 0.005)
## s prior
s = yield GammaModeSD(1,1)
## theta prior
theta = yield tfd.LogNormal(0,s)
## p prior
p = yield BetaModeConc(0.1,c1)
## tfp bug, need to cast tensor to float32
#theta = tf.cast(theta, tf.float32)
#p = tf.cast(p, tf.float32)
## q formula
q = (theta*p)/(1-p+theta*p)
## qi prior
qi = yield BetaModeConc(tf.repeat(q,N_RNA), c2)
## qi likelihood
k = yield tfd.Binomial(tf.cast(Depth,tf.float32),qi)
# p likelihood
a = yield tfd.Binomial(tf.cast(Depth,tf.float32),p)
return tfd.JointDistributionCoroutineAutoBatched(model)
My model generates two different sets of data which are a and k. If it only has a or k, then I could specify my log_prob function by
def joint_log_prob(*args):
return joint.log_prob(*args, likelihood = data)
or
joint_log_prob = lambda *x: model.log_prob(x + (data,))
But my question is how to incorporate two different sets of data into one log_prob function? Thank you!
The simplest solution would just be specifying both. Assuming data is a tuple:
def joint_log_prob(*args):
return joint.log_prob(*args, a=data[0], k=data[1])
or
joint_log_prob = lambda *x: model.log_prob(x + data)
You might also like to write:
joint_log_prob = joint.experimental_pin(a=.., k=..).unnormalized_log_prob
(See JointDistributionPinned)
I'm supposed to change part of a python script on the GitHub website. This code is an attention-based similarity measure, but I want to turn it to cosine similarity.
The respective code is in the layers.py file (inside the call method).
Attention-Based:
def __call__(self, inputs):
x = inputs
# dropout
if self.sparse_inputs:
x = sparse_dropout(x, 1-self.dropout, self.num_features_nonzero)
else:
x = tf.nn.dropout(x, 1-self.dropout)
# graph learning
h = dot(x, self.vars['weights'], sparse=self.sparse_inputs)
N = self.num_nodes
edge_v = tf.abs(tf.gather(h,self.edge[0]) - tf.gather(h,self.edge[1]))
edge_v = tf.squeeze(self.act(dot(edge_v, self.vars['a'])))
sgraph = tf.SparseTensor(indices=tf.transpose(self.edge), values=edge_v, dense_shape=[N, N])
sgraph = tf.sparse_softmax(sgraph)
return h, sgraph
I edited the above code to what I believe are my requirements (cosine similarity). However, when I run the following code, like so:
def __call__(self, inputs):
x = inputs
# dropout
if self.sparse_inputs:
x = sparse_dropout(x, 1-self.dropout, self.num_features_nonzero)
else:
x = tf.nn.dropout(x, 1-self.dropout)
# graph learning
h = dot(x, self.vars['weights'], sparse=self.sparse_inputs)
N = self.num_nodes
h_norm = tf.nn.l2_normalize(h)
edge_v = tf.matmul(h_norm, tf.transpose(h_norm))
h_norm_1 = tf.norm(h_norm)
edge_v /= h_norm_1 * h_norm_1
edge_v = dot(edge_v, self.vars['a']) # It causes an error when I add this line
zero = tf.constant(0, dtype=tf.float32)
where = tf.not_equal(edge_v, zero)
indices = tf.where(where)
values = tf.gather_nd(edge_v, indices)
sgraph = tf.SparseTensor(indices, values, dense_shape= [N,N])
return h, sgraph
The script shows some runtime errors:
Screenshot of error message
I suspect the error here is related to line 226:
edge_v = dot(edge_v, self.vars['a']) # It causes an error when I add this line
Any admonition on how to accomplish this successfully?
Link of the script on GitHub:
https://github.com/jiangboahu/GLCN-tf
Note: I don't want to use built-in functions, because I think they are not precise to do this job.
ETA: It appears that there are some answers around but they seem to tackle different problems, as far, as I understood them.
Thanks a bunch in advance
What's the dot? Have you imported the method?
It should either be:
edge_v = tf.keras.backend.dot(edge_v, self.vars['a'])
or
edge_v = tf.tensordot(edge_v, self.vars['a'])
cvxpy has a very neat way to write out the optimisation form without worrying too much about converting it into a "standard" matrix form as this is done internally somehow. Best to explain with an example:
def cvxpy_implementation():
var1 = cp.Variable()
var2 = cp.Variable()
constraints = [
var1 <= 3,
var2 >= 2
]
obj_fun = cp.Minimize(var1**2 + var2**2)
problem = cp.Problem(obj_fun, constraints)
problem.solve()
return var1.value, var2.value
def scipy_implementation1():
A = np.diag(np.ones(2))
lb = np.array([-np.inf, 2])
ub = np.array([3, np.inf])
con = LinearConstraint(A, lb, ub)
def obj_fun(x):
return (x**2).sum()
result = minimize(obj_fun, [0, 0], constraints=con)
return result.x
def scipy_implementation2():
con = [
{'type': 'ineq', 'fun': lambda x: 3 - x[0]},
{'type': 'ineq', 'fun': lambda x: x[1] - 2},]
def obj_fun(x):
return (x**2).sum()
result = minimize(obj_fun, [0, 0], constraints=con)
return result.x
All of the above give the correct result but the cvxpy implementation is much "easier" to write out, specifically I don't have to worry about the inequalities and can name variables useful thinks when writing out the inequalities. Compare that to the scipy1 and scipy2 implementations where in the first case I have to write out these extra infs and in the second case I have to remember which variable is which. You can imagine a case where I have 100 variables and while concatenating them will ultimately need to be done I'd like to be able to write it out like in cvxpy.
Question:
Has anyone implemented this for scipy? or is there an alternative library that could make this work?
thank you
Wrote something up that would do this and seems to cover the main issues I had in mind.
The general idea is you define variables and then create a simple expression as you would normally write it out and then the solver class optimises over the defined variables
https://github.com/evan54/optimisation/blob/master/var.py
The example below illustrates a simple use case
# fake data
a = 2
m = 3
x = np.linspace(0, 10)
y = a * x + m + np.random.randn(len(x))
a_ = Variable()
m_ = Variable()
y_ = a_ * x + m_
error = y_ - y
prob = Problem((error**2).sum(), None)
prob.minimize() print(f'a = {a}, a_ = {a_}') print(f'm = {m}, m_ = {m_}')
I am building machine learning models for a certain data set. Then, based on the constraints and bounds for the outputs and inputs, I am trying to find the input parameters for the most minimized answer.
The problem which I am facing is that, when the model is a linear regression model or something like lasso, the minimization works perfectly fine.
However, when the model is "Decision Tree", it constantly returns the very initial value that is given to it. So basically, it does not enforce the constraints.
import numpy as np
import pandas as pd
from scipy.optimize import minimize
I am using the very first sample from the input data set for the optimization. As it is only one sample, I need to reshape it to (1,-1) as well.
x = df_in.iloc[0,:]
x = np.array(x)
x = x.reshape(1,-1)
This is my Objective function:
def objective(x):
x = np.array(x)
x = x.reshape(1,-1)
y = 0
for n in range(df_out.shape[1]):
y = Model[n].predict(x)
Y = y[0]
return Y
Here I am defining the bounds of inputs:
range_max = pd.DataFrame(range_max)
range_min = pd.DataFrame(range_min)
B_max=[]
B_min =[]
for i in range(range_max.shape[0]):
b_max = range_max.iloc[i]
b_min = range_min.iloc[i]
B_max.append(b_max)
B_min.append(b_min)
B_max = pd.DataFrame(B_max)
B_min = pd.DataFrame(B_min)
bnds = pd.concat([B_min, B_max], axis=1)
These are my constraints:
con_min = pd.DataFrame(c_min)
con_max = pd.DataFrame(c_max)
Here I am defining the constraint function:
def const(x):
x = np.array(x)
x = x.reshape(1,-1)
Y = []
for n in range(df_out.shape[1]):
y = Model[n].predict(x)[0]
Y.append(y)
Y = pd.DataFrame(Y)
a4 =[]
for k in range(Y.shape[0]):
a1 = Y.iloc[k,0] - con_min.iloc[k,0]
a2 = con_max.iloc[k, 0] - Y.iloc[k,0]
a3 = [a2,a1]
a4 = np.concatenate([a4, a3])
return a4
c = const(x)
con = {'type': 'ineq', 'fun': const}
This is where I try to minimize. I do not pick a method as the automatically picked model has worked so far.
sol = minimize(fun = objective, x0=x,constraints=con, bounds=bnds)
So the actual constraints are:
c_min = [0.20,1000]
c_max = [0.3,1600]
and the max and min range for the boundaries are:
range_max = [285,200,8,85,0.04,1.6,10,3.5,20,-5]
range_min = [215,170,-1,60,0,1,6,2.5,16,-18]
I think you should check the output of 'sol'. At times, the algorithm is not able to perform line search completely. To check for this, you should check message associated with 'sol'. In such a case, the optimizer returns initial parameters itself. There may be various reasons of this behavior. In a nutshell, please check the output of sol and act accordingly.
Arad,
If you have not yet resolved your issue, try using scipy.optimize.differential_evolution instead of scipy.optimize.minimize. I ran into similar issues, particularly with decision trees because of their step-like behavior resulting in infinite gradients.
I'm trying to plot the difference between two variables. I'm following the example set here (search for true_p_A and it will be in the right section)
Here is my code
def cool(test):
n_data_points = len(test)
alpha = 1.0/np.mean(test)
lambda_1 = pm.Exponential("lambda_1", alpha) # prior on first behaviour
lambda_2 = pm.Exponential("lambda_2", alpha) # prior on second behaviour
tau = pm.DiscreteUniform("tau", lower=0, upper=len(test)) # prior on behaviour change
"""
The below deterministic functions map an assignment, in this case 0 or 1,
to a set of parameters, located in the (1,2) arrays `taus` and `centers`.
"""
#pm.deterministic
def lambda_(tau=tau, lambda_1=lambda_1, lambda_2=lambda_2):
out = np.zeros(n_data_points)
out[:tau] = lambda_1 # lambda before tau is lambda1
out[tau:] = lambda_2 # lambda after tau is lambda2
return out
def delta(p_A=lambda_1, p_B=lambda_2):
return p_A - p_B
obs = pm.Poisson("obs", lambda_, value=test, observed=True)
model = pm.Model([obs, lambda_, lambda_1, lambda_2, tau,delta])
mcmc = pm.MCMC(model)
mcmc.sample(5000, 1000, 1)
return mcmc,5000,1
def main_plotter(stats,test):
mcmc,N,bin = stats
n_count_data = len(test)
lambda_1_samples = mcmc.trace('lambda_1')[:]
lambda_2_samples = mcmc.trace('lambda_2')[:]
tau_samples = mcmc.trace('tau')[:]
delta_samples = mcmc.trace('delta')
print(delta_samples)
data = [1,2,1,2.2,5,5.5,6,5.4]
main_plotter( cool(data),data)
In the example no variable is created for delta so no key value is inserted. Whenever I run this code is tells me it can't find the key. My question is what do I need to do to access the delta posterior data?
You are missing the deterministic decorator before the delta function definition. It works if you change starting at line 21:
#pm.deterministic
def delta(p_A=lambda_1, p_B=lambda_2):
return p_A - p_B