Julia DifferentialEquations.jl all variable output - variables

I have the following example:
using DifferentialEquations
function test1(du,u,p,t)
a,b,c = p
d=a^0.1*(t+1)
e=u[1]/a
f=u[2]/d
du[1] = a*u[1]
du[2] = d*u[2]
du[3] = b*u[2] - c*u[3]
end
p = (2,0.75,0.8)
u0 = [1.0;1.0;1.0]
tspan = (0.0,3.0)
prob = ODEProblem(test1,u0,tspan,p)
sol = solve(prob,saveat=0.3)
The sol objects contain state outputs but, I need efficiently other variables ("d","e","f") as well.
The closest I can get is:
function test2(du,u,p,t)
global i
global Out_values
global sampletimes
a,b,c = p
d=a^0.1*(t+1)
e=u[1]/a
f=u[2]/d
if t in sampletimes
Out_values[1,i] = d
Out_values[2,i] = e
Out_values[3,i] = f
i=i+1
end
du[1] = a*u[1]
du[2] = d*u[2]
du[3] = b*u[2] - c*u[3]
end
sampletimes = tspan[1]:0.3:tspan[2]
Out_values = Array{Float64}(undef, 3, 2*11)
i=1
prob = ODEProblem(test2,u0,tspan,p)
sol = solve(prob,saveat=0.3,tstops=sampletimes)
However, this solution is not ideal because:
it duplicates saveat and I get two sets of slightly different outputs (not sure why), and
it can't expand if I decide not to use saveat and I want to output all solutions, i.e. sol = solve(prob).
Any help is appreciated.

Related

Force single line of string in VObject

I am trying to create vCards (Email contacts) unsing the vobject library and pandas for python.
When serializing the values I get new lines in the "notes" of my output(no new lines in source). In every new line, created by ".serialize()", there is also a space in the beginning. I would need to get rid of both.
Example of output:
BEGIN:VCARD
VERSION:3.0
EMAIL;TYPE=INTERNET:test#example.at
FN:Valentina test
N:Valentina;test;;;
NOTE:PelletiererIn Mitglieder 2 Preiserhebung Aussendung 2 Pressespiegelver
sand 2 GeschäftsführerIn PPA_PelletiererInnen GeschäftsführerIn_Pellet
iererIn
ORG:Test Company
TEL;TYPE=CELL:
TEL;TYPE=CELL:
TEL;TYPE=CELL:
END:VCARD
Is there a way that I can force the output in a single line?
output = ""
for _,row in df.iterrows():
j = vobject.vCard()
j.add('n')
j.n.value = vobject.vcard.Name(row["First Name"],row["Last Name"])
j.add('fn')
j.fn.value = (str(row["First Name"]) + " " + row["Last Name"])
o = j.add("email")
o.value = str((row["E-mail Address"]))
o.type_param = "INTERNET"
#o = j.add("email")
#o.value = str((row["E-mail 2 Address"]))
#o.type_param = "INTERNET"
j.add('org')
j.org.value = [row["Organization"]]
k = j.add("tel")
k.value = str(row["Home Phone"])
k.type_param = "CELL"
k = j.add("tel")
k.value = str(row["Business Phone"])
k.type_param = "CELL"
k = j.add("tel")
k.value = str(row["Mobile Phone"])
k.type_param = "CELL"
j.add("note")
j.note.value = row["Notiz für Kontaktexport"]
output += j.serialize()
print(output)

Getting the charge of a single atom, per loop in MD Analysis

I have been trying to use the partial charge of one particular ion to go through a calculation within mdanalysis.
I have tried(This is just a snippet from the code that I know is throwing the error):
Cl = u.select_atoms('resname CLA and prop z <= 79.14')
Lz = 79.14 #Determined from system set-up
Q_sum = 0
COM = 38.42979431152344 #Determined from VMD
file_object1 = open(fors, 'a')
print(dcd, file = file_object1)
for ts in u.trajectory[200:]:
frame = u.trajectory.frame
time = u.trajectory.time
for coord in Cl.positions:
q= Cl.total_charge(Cl.position[coord][2])
coords = coord - (Lz/COM)
q_prof = q * (coords + (Lz / 2)) / Lz
Q_sum = Q_sum + q_prof
print(q)
But I keep getting an error associated with this.
How would I go about selecting this particular atom as it goes through the loop to get the charge of it in MD Analysis? Before I was setting q to equal a constant and the code ran fine so I know it is only this line that is throwing the error:
q = Cl.total_charge(Cl.position[coord][2])
Thanks for the help!
I figured it out with:
def Q_code(dcd, topo):
Lz = u.dimensions[2]
Q_sum = 0
count = 0
CLAs = u.select_atoms('segid IONS or segid PROA or segid PROB or segid MEMB')
ini_frames = -200
n_frames = len(u.trajectory[ini_frames:])
for ts in u.trajectory[ini_frames:]:
count += 1
membrane = u.select_atoms('segid PROA or segid PROB or segid MEMB')
COM = membrane.atoms.center_of_mass()[2]
q_prof = CLAs.atoms.charges * (CLAs.positions[:,2] + (Lz/2 - COM))/Lz
Q_instant = np.sum(q_prof)
Q_sum += Q_instant
Q_av = Q_sum / n_frames
with open('Q_av.txt', 'a') as f:
print('The Q_av for {} is {}'.format(s, Q_av), file = f)
return Q_av

Can't get dimensions of arrays equal to plot with MatPlotLib

I am trying to create a plot of arrays where one is calculated based on my x-axis calculated in a for loop. I've gone through my code multiple times and tested in between what exactly the lengths are for my arrays, but I can't seem to think of a solution that makes them equal length.
This is the code I have started with:
import numpy as np
import matplotlib.pyplot as plt
a = 1 ;b = 2 ;c = 3; d = 1; e = 2
t0 = 0
t_end = 10
dt = 0.05
t = np.arange(t0, t_end, dt)
n = len(t)
fout = 1
M = 1
Ca = np.zeros(n)
Ca[0] = a; Cb[0] = b
Cc[0] = 0;
k1 = 1
def rA(Ca, Cb, Cc, t):
-k1 * Ca**a * Cb**b * dt
return -k1 * Ca**a * Cb**b * dt
while e > 1e-3:
t = np.arange(t0, t_end, dt)
n = len(t)
for i in range(1,n-1):
Ca[i+1] = Ca[i] + rA(Ca[i], Cb[i], Cc[i], t[i])
e = abs((M-Ca[n-1])/M)
M = Ca[n-1]
dt = dt/2
plt.plot(t, Ca)
plt.grid()
plt.show()
Afterwards, I try to calculate a second function for different y-values. Within the for loop I added:
Cb[i+1] = Cb[i] + rB(Ca[i], Cb[i], Cc[i], t[i])
While also defining rB in a similar manner as rA. The error code I received at this point is:
IndexError: index 200 is out of bounds for axis 0 with size 200
I feel like it has to do with the way I'm initializing the arrays for my Ca. To put it in MatLab code, something I'm more familiar with, looks like this in MatLab:
Ca = zeros(1,n)
I have recreated the code I have written here in MatLab and I do receive a plot. So I'm wondering where I am going wrong here?
So I thought my best course of action was to change n to an int by just changing it in the while loop.
but after changing n = len(t) to n = 100 I received the following error message:
ValueError: x and y must have same first dimension, but have shapes (200,) and (400,)
As my previous question was something trivial I just kept on missing out on, I feel like this is the same. But I have spent over an hour looking and trying fixes without succes.

Is there a way to get max using pandas Dataframe.eval instead of Dataframe.max?

Is there a way to get the maximum per row using eval?
It would be very sweet to be able to write something like:
X = pd.DataFrame({'A':[1,2,3,4]})
X.eval("""B = 2* A
C = A +B
D = max(A, B)
E = 2 * D
""", inplace = True)
Instead of:
X = pd.DataFrame({'A':[1,2,3,4]})
X.eval("""B = 2* A
C = A +B
""", inplace = True)
X['D'] = X.loc[:,['A', 'B']].max(axis=1)
X.eval('E = 2 * D', inplace=True)
EDIT:
As suggested by #mephisto, something similar to this works beautifully:
def rowmax(A,B):
return pd.concat([A,B], axis=1).max(axis=1)
X = pd.DataFrame({'A':[0, 1,2,3,4]})
X.eval("""B = A % 2 +1
D = #rowmax(A, B)
""", inplace = True)
I am interested in knowing other alternatives.
You should be able to call a custom or predefined function with #. In your case you want to call df.max(), so try this X.eval('#df.max()').
Hope this helps

How to speed up simple linear algebra optimization probelm in Julia?

I implemented the LSDD changepoint detection method decribed in [1] in Julia, to see if I could make it faster than the existing python implementation [2], which is based on a grid search that looks for the optimal parameters.
I obtain the desired results but despite my best efforts, my grid search version of it takes about the same time to compute as the python one, which is still way too long for real applications.
I also tried using the Optimize package which only makes things worse (2 or 3 times slower).
Here is the grid search that I implemented :
using Random
using LinearAlgebra
function squared_distance(X::Array{Float64,1},C::Array{Float64,1})
sqd = zeros(length(X),length(C))
for i in 1:length(X)
for j in 1:length(C)
sqd[i,j] = X[i]^2 + C[j]^2 - 2*X[i]*C[j]
end
end
return sqd
end
function lsdd(x::Array{Float64,1},y::Array{Float64,1}; folds = 5, sigma_list = nothing , lambda_list = nothing)
lx,ly = length(x), length(y)
b = min(lx+ly,300)
C = shuffle(vcat(x,y))[1:b]
CC_dist2 = squared_distance(C,C)
xC_dist2, yC_dist2 = squared_distance(x,C), squared_distance(y,C)
Tx,Ty = length(x) - div(lx,folds), length(y) - div(ly,folds)
#Define the training and testing data sets
cv_split1, cv_split2 = floor.(collect(1:lx)*folds/lx), floor.(collect(1:ly)*folds/ly)
cv_index1, cv_index2 = shuffle(cv_split1), shuffle(cv_split2)
tr_idx1,tr_idx2 = [findall(x->x!=i,cv_index1) for i in 1:folds], [findall(x->x!=i,cv_index2) for i in 1:folds]
te_idx1,te_idx2 = [findall(x->x==i,cv_index1) for i in 1:folds], [findall(x->x==i,cv_index2) for i in 1:folds]
xTr_dist, yTr_dist = [xC_dist2[i,:] for i in tr_idx1], [yC_dist2[i,:] for i in tr_idx2]
xTe_dist, yTe_dist = [xC_dist2[i,:] for i in te_idx1], [yC_dist2[i,:] for i in te_idx2]
if sigma_list == nothing
sigma_list = [0.25, 0.5, 0.75, 1, 1.2, 1.5, 2, 2.5, 2.2, 3, 5]
end
if lambda_list == nothing
lambda_list = [1.00000000e-03, 3.16227766e-03, 1.00000000e-02, 3.16227766e-02,
1.00000000e-01, 3.16227766e-01, 1.00000000e+00, 3.16227766e+00,
1.00000000e+01]
end
#memory prealocation
score_cv = zeros(length(sigma_list),length(lambda_list))
H = zeros(b,b)
hx_tr, hy_tr = [zeros(b,1) for i in 1:folds], [zeros(b,1) for i in 1:folds]
hx_te, hy_te = [zeros(1,b) for i in 1:folds], [zeros(1,b) for i in 1:folds]
#h_tr,h_te = zeros(b,1), zeros(1,b)
theta = zeros(b)
for (sigma_idx,sigma) in enumerate(sigma_list)
#the expression of H is different for higher dimension
#H = sqrt((sigma^2)*pi)*exp.(-CC_dist2/(4*sigma^2))
set_H(H,CC_dist2,sigma,b)
#check if the sum is performed along the right dimension
set_htr(hx_tr,xTr_dist,sigma,Tx), set_htr(hy_tr,yTr_dist,sigma,Ty)
set_hte(hx_te,xTe_dist,sigma,lx-Tx), set_hte(hy_te,yTe_dist,sigma,ly-Ty)
for i in 1:folds
h_tr = hx_tr[i] - hy_tr[i]
h_te = hx_te[i] - hy_te[i]
#set_h(h_tr,hx_tr[i],hy_tr[i],b)
#set_h(h_te,hx_te[i],hy_te[i],b)
for (lambda_idx,lambda) in enumerate(lambda_list)
set_theta(theta,H,lambda,h_tr,b)
score_cv[sigma_idx,lambda_idx] += dot(theta,H*theta) - 2*dot(theta,h_te)
end
end
end
#retrieve the value of the optimal parameters
sigma_chosen = sigma_list[findmin(score_cv)[2][2]]
lambda_chosen = lambda_list[findmin(score_cv)[2][2]]
#calculating the new "optimal" solution
H = sqrt((sigma_chosen^2)*pi)*exp.(-CC_dist2/(4*sigma_chosen^2))
H_lambda = H + lambda_chosen*Matrix{Float64}(I, b, b)
h = (1/lx)*sum(exp.(-xC_dist2/(2*sigma_chosen^2)),dims = 1) - (1/ly)*sum(exp.(-yC_dist2/(2*sigma_chosen^2)),dims = 1)
theta_final = H_lambda\transpose(h)
f = transpose(theta_final).*sum(exp.(-vcat(xC_dist2,yC_dist2)/(2*sigma_chosen^2)),dims = 1)
L2 = 2*dot(theta_final,h) - dot(theta_final,H*theta_final)
return L2
end
function set_H(H::Array{Float64,2},dist::Array{Float64,2},sigma::Float64,b::Int16)
for i in 1:b
for j in 1:b
H[i,j] = sqrt((sigma^2)*pi)*exp(-dist[i,j]/(4*sigma^2))
end
end
end
function set_theta(theta::Array{Float64,1},H::Array{Float64,2},lambda::Float64,h::Array{Float64,2},b::Int64)
Hl = (H + lambda*Matrix{Float64}(I, b, b))
LAPACK.posv!('L', Hl, h)
theta = h
end
function set_htr(h::Array{Float64,1},dists::Array{Float64,2},sigma::Float64,T::Int16)
for (CVidx,dist) in enumerate(dists)
for (idx,value) in enumerate((1/T)*sum(exp.(-dist/(2*sigma^2)),dims = 1))
h[CVidx][idx] = value
end
end
end
function set_hte(h::Array{Float64,1},dists::Array{Float64,2},sigma::Array{Float64,1},T::Int16)
for (CVidx,dist) in enumerate(dists)
for (idx,value) in enumerate((1/T)*sum(exp.(-dist/(2*sigma^2)),dims = 1))
h[CVidx][idx] = value
end
end
end
function set_h(h,h1,h2,b)
for i in 1:b
h[i] = h1[i] - h2[i]
end
end
The set_H, set_h and set_theta functions are there because I read somewhere that modifying prealocated memory in place with a function was faster, but it did not make a great difference.
To test it, I use two random distribution as input data :
x,y = rand(500),1.5*rand(500)
lsdd(x,y) #returns a value around 0.3
Now here is the version of the code where I try to use Optimizer :
function Theta(sigma::Float64,lambda::Float64,x::Array{Float64,1},y::Array{Float64,1},folds::Int8)
lx,ly = length(x), length(y)
b = min(lx+ly,300)
C = shuffle(vcat(x,y))[1:b]
CC_dist2 = squared_distance(C,C)
xC_dist2, yC_dist2 = squared_distance(x,C), squared_distance(y,C)
#the subsets are not be mutually exclusive !
Tx,Ty = length(x) - div(lx,folds), length(y) - div(ly,folds)
shuffled_x, shuffled_y = [shuffle(1:lx) for i in 1:folds], [shuffle(1:ly) for i in 1:folds]
cv_index1, cv_index2 = floor.(collect(1:lx)*folds/lx)[shuffle(1:lx)], floor.(collect(1:ly)*folds/ly)[shuffle(1:ly)]
tr_idx1,tr_idx2 = [i[1:Tx] for i in shuffled_x], [i[1:Ty] for i in shuffled_y]
te_idx1,te_idx2 = [i[Tx:end] for i in shuffled_x], [i[Ty:end] for i in shuffled_y]
xTr_dist, yTr_dist = [xC_dist2[i,:] for i in tr_idx1], [yC_dist2[i,:] for i in tr_idx2]
xTe_dist, yTe_dist = [xC_dist2[i,:] for i in te_idx1], [yC_dist2[i,:] for i in te_idx2]
score_cv = 0
Id = Matrix{Float64}(I, b, b)
H = sqrt((sigma^2)*pi)*exp.(-CC_dist2/(4*sigma^2))
hx_tr, hy_tr = [transpose((1/Tx)*sum(exp.(-dist/(2*sigma^2)),dims = 1)) for dist in xTr_dist], [transpose((1/Ty)*sum(exp.(-dist/(2*sigma^2)),dims = 1)) for dist in yTr_dist]
hx_te, hy_te = [(lx-Tx)*sum(exp.(-dist/(2*sigma^2)),dims = 1) for dist in xTe_dist], [(ly-Ty)*sum(exp.(-dist/(2*sigma^2)),dims = 1) for dist in yTe_dist]
for i in 1:folds
h_tr, h_te = hx_tr[i] - hy_tr[i], hx_te[i] - hy_te[i]
#theta = (H + lambda * Id)\h_tr
theta = copy(h_tr)
Hl = (H + lambda*Matrix{Float64}(I, b, b))
LAPACK.posv!('L', Hl, theta)
score_cv += dot(theta,H*theta) - 2*dot(theta,h_te)
end
return score_cv,(CC_dist2,xC_dist2,yC_dist2)
end
function cost(params::Array{Float64,1},x::Array{Float64,1},y::Array{Float64,1},folds::Int8)
s,l = params[1],params[2]
return Theta(s,l,x,y,folds)[1]
end
"""
Performs the optinization
"""
function lsdd3(x::Array{Float64,1},y::Array{Float64,1}; folds = 4)
start = [1,0.1]
b = min(length(x)+length(y),300)
lx,ly = length(x),length(y)
#result = optimize(params -> cost(params,x,y,folds),fill(0.0,2),fill(50.0,2),start, Fminbox(LBFGS(linesearch=LineSearches.BackTracking())); autodiff = :forward)
result = optimize(params -> cost(params,x,y,folds),start, BFGS(),Optim.Options(f_calls_limit = 5, iterations = 5))
#bboptimize(rosenbrock2d; SearchRange = [(-5.0, 5.0), (-2.0, 2.0)])
#result = optimize(cost,[0,0],[Inf,Inf],start, Fminbox(AcceleratedGradientDescent()))
sigma_chosen,lambda_chosen = Optim.minimizer(result)
CC_dist2, xC_dist2, yC_dist2 = Theta(sigma_chosen,lambda_chosen,x,y,folds)[2]
H = sqrt((sigma_chosen^2)*pi)*exp.(-CC_dist2/(4*sigma_chosen^2))
h = (1/lx)*sum(exp.(-xC_dist2/(2*sigma_chosen^2)),dims = 1) - (1/ly)*sum(exp.(-yC_dist2/(2*sigma_chosen^2)),dims = 1)
theta_final = (H + lambda_chosen*Matrix{Float64}(I, b, b))\transpose(h)
f = transpose(theta_final).*sum(exp.(-vcat(xC_dist2,yC_dist2)/(2*sigma_chosen^2)),dims = 1)
L2 = 2*dot(theta_final,h) - dot(theta_final,H*theta_final)
return L2
end
No matter, which kind of option I use in the optimizer, I always end up with something too slow. Maybe the grid search is the best option, but I don't know how to make it faster... Does anyone have an idea how I could proceed further ?
[1] : http://www.mcduplessis.com/wp-content/uploads/2016/05/Journal-IEICE-2014-CLSDD-1.pdf
[2] : http://www.ms.k.u-tokyo.ac.jp/software.html