Need to plot multiple values over each number of iterations (python help) - matplotlib

I'm trying to plot the multiple values one gets for 'f_12' over a certain number of iterations. It should look something like points with high oscillations when there is low iterations 'N' and then it converges to a rough value of 0.204. I'm getting the correct outputs for 'f_12' but I'm having a really hard time doing the plots. New to python here.
start = time.time()
# looking for F_12 via monte carlo method
# Inputs
# N = number of rays to generate
N = 1000
# w = width of plates
w = 1
# h = vertical seperation of plates
# L = horizontal offset of plates (L=w=h)
L = 1
h = 1
p_points = 100
# counter for number of rays and number of hits
rays = 0
hits = 0
while rays < N:
rays = rays + 1
# random origin of rays along w on surface 1
Rx = random.uniform(0, 1)
Rt = random.uniform(0, 1)
Rph = random.uniform(0, 1)
x1 = Rx * w
# polar and azimuth angles - random ray directions
theta = np.arcsin(np.sqrt(Rt))
phi = 2*np.pi*Rph
# theta = np.arcsin(Rt)
xi = x1 + h*np.tan(theta)*np.cos(phi)
if xi >= L and xi <= (L+w):
hit = 1
else:
hit = 0
hits = hits + hit
gap = N/ p_points
r = rays%gap
if r == 0:
F = hits/ rays
plt.figure(figsize=(8, 4))
plt.plot(N, F, linewidth=2)
plt.xlabel("N - Rays")
plt.ylabel("F_12")
plt.show()
f_12 = hits/ N
print(f"F_12 = {f_12} at N = {N} iterations")
# Grab Currrent Time After Running the Code
end = time.time()
#Subtract Start Time from The End Time
total_time = end - start
f_time = round(total_time)
print(f"Running time = {f_time} seconds")

Related

Getting the charge of a single atom, per loop in MD Analysis

I have been trying to use the partial charge of one particular ion to go through a calculation within mdanalysis.
I have tried(This is just a snippet from the code that I know is throwing the error):
Cl = u.select_atoms('resname CLA and prop z <= 79.14')
Lz = 79.14 #Determined from system set-up
Q_sum = 0
COM = 38.42979431152344 #Determined from VMD
file_object1 = open(fors, 'a')
print(dcd, file = file_object1)
for ts in u.trajectory[200:]:
frame = u.trajectory.frame
time = u.trajectory.time
for coord in Cl.positions:
q= Cl.total_charge(Cl.position[coord][2])
coords = coord - (Lz/COM)
q_prof = q * (coords + (Lz / 2)) / Lz
Q_sum = Q_sum + q_prof
print(q)
But I keep getting an error associated with this.
How would I go about selecting this particular atom as it goes through the loop to get the charge of it in MD Analysis? Before I was setting q to equal a constant and the code ran fine so I know it is only this line that is throwing the error:
q = Cl.total_charge(Cl.position[coord][2])
Thanks for the help!
I figured it out with:
def Q_code(dcd, topo):
Lz = u.dimensions[2]
Q_sum = 0
count = 0
CLAs = u.select_atoms('segid IONS or segid PROA or segid PROB or segid MEMB')
ini_frames = -200
n_frames = len(u.trajectory[ini_frames:])
for ts in u.trajectory[ini_frames:]:
count += 1
membrane = u.select_atoms('segid PROA or segid PROB or segid MEMB')
COM = membrane.atoms.center_of_mass()[2]
q_prof = CLAs.atoms.charges * (CLAs.positions[:,2] + (Lz/2 - COM))/Lz
Q_instant = np.sum(q_prof)
Q_sum += Q_instant
Q_av = Q_sum / n_frames
with open('Q_av.txt', 'a') as f:
print('The Q_av for {} is {}'.format(s, Q_av), file = f)
return Q_av

Offset rotation matrix

I'm working with 2 imu's. I need to offset all frames with the first frame from the sensor. I have created a fictive scenario, where I precisely know the rotation and the wanted result. I need the two sensors to show the same result when their initial (start) orientation is subtracted.
import numpy as np
# Sensor 0,1 and 2 start orientation in degrees
s0_x = 30
s0_y = 0
s0_z = 0
s1_x = 0
s1_y = 40
s1_z = 0
s2_x = 10
s2_y = 40
s2_z= -10
# Change from start frame 1
x1 = 20
y1 = 10
z1 = 0
# Change from start frame 2
x2 = 60
y2 = 30
z2 = 30
GCS= [[1,0,0],[0,1,0],[0,0,1]]
sensor0 = [[s0_x, s0_y, s0_z], [s0_x, s0_y, s0_z], [s0_x, s0_y, s0_z]]
sensor1 = [[s1_x, s1_y, s1_z], [s1_x + x1, s1_y + y1, s1_z + z1],[s1_x + x1 + x2, s1_y + y1+ y2, s1_z + z1+ z2]]
sensor2 = [[s2_x, s2_y, s2_z], [s2_x + x1, s2_y + y1, s2_z + z1], [s2_x + x1+ x2, s2_y + y1+ y2, s2_z + z1+ z2]]
def Rot_Mat_X(theta):
r = np.array([[1,0,0],[0,np.cos(np.deg2rad(theta)),-np.sin(np.deg2rad(theta))],[0,np.sin(np.deg2rad(theta)),np.cos(np.deg2rad(theta))]])
return r
# rotation the rotation matrix around the Y axis (input in deg)
def Rot_Mat_Y(theta):
r = np.array([[np.cos(np.deg2rad(theta)),0,np.sin(np.deg2rad(theta))],
[0,1,0],
[-np.sin(np.deg2rad(theta)),0,np.cos(np.deg2rad(theta))]])
return r
# rotation the rotation matrix around the Z axis (input in deg)
def Rot_Mat_Z(theta):
r = np.array([[np.cos(np.deg2rad(theta)),-np.sin(np.deg2rad(theta)),0],
[np.sin(np.deg2rad(theta)),np.cos(np.deg2rad(theta)),0],
[0,0,1]])
return r
# Creating the rotation matrices
r_sensor0 = []
r_sensor1= []
r_sensor2= []
for i in range(len(sensor1)):
r_sensor1_z = np.matmul(Rot_Mat_X(sensor1[i][0]),GCS)
r_sensor1_zy = np.matmul(Rot_Mat_Y(sensor1[i][1]),r_sensor1_z)
r_R_Upperarm_medial_zyx = np.matmul(Rot_Mat_Z(sensor1[i][2]),r_sensor1_zy )
r_sensor1.append(r_R_Upperarm_medial_zyx )
r_sensor2_z = np.matmul(Rot_Mat_X(sensor2[i][0]),GCS)
r_sensor2_zy = np.matmul(Rot_Mat_Y(sensor2[i][1]),r_sensor2_z )
r_sensor2_zyx = np.matmul(Rot_Mat_Z(sensor2[i][2]),r_sensor2_zy )
r_sensor2.append(r_sensor2_zyx )
r_start_sensor1 = r_sensor1[0]
r_start_sensor2 = r_sensor2[0]
r_offset_sensor1 = []
r_offset_sensor2 = []
for i in range(len(sensor0)):
r_offset_sensor1.append(np.matmul(np.transpose(r_start_sensor1),r_sensor1[i]))
r_offset_sensor2.append(np.matmul(np.transpose(r_start_sensor2),r_sensor2[i]))
# result:
r_offset_sensor1[0] = [[1,0,0],[0,1,0],[0,0,1]]
r_offset_sensor1[1] = [[0.984,0.059,0.163],[0,0.939,-0.342],[-0.173,0.336,0.925]]
r_offset_sensor1[2] = [[0.748,0.466,0.471],[0.086,0.635,-0.767],[-0.657,0.615,0.434]]
r_offset_sensor2[0] = [[1,0,0],[0,1,0],[0,0,1]]
r_offset_sensor2[1] = [[0.984,0.086,0.150],[-0.03,0.938,-0.344],[-0.171,0.334,0.926]]
r_offset_sensor2[2] = [[0.748,0.541,0.383],[-0.028,0.603,-0.797],[-0.662,0.585,0.466]]
I expect the result of sensors 1 and 2 to be equal for all frames but it doesn't? And they should be:
frame[0] = [1,0,0],[0,1,0],[0,0,1]
frame[1] = [0.984,0,0.173],[0.059,0.939,-0.336],[-0.163,0.342,0.9254]
frame[2] = [0.750,-0.433,0.50],[0.625,0.216,-0.750],[0.216,0.875,0.433]

Sequential sampling from conditional multivariate normal

I'm trying to sequentially sample from a Gaussian Process prior.
The problem is that the samples eventually converge to zero or diverge to infinity.
I'm using the basic conditionals described e.g. here
Note: the kernel(X,X) function returns the squared exponential kernel with isometric noise.
Here is my code:
n = 32
x_grid = np.linspace(-5,5,n)
x_all = []
y_all = []
for x in x_grid:
x_all = [x] + x_all
X = np.array(x_all).reshape(-1, 1)
# Mean and covariance of the prior
mu = np.zeros((X.shape), np.float)
cov = kernel(X, X)
if len(mu)==1: # first sample is not conditional
y = np.random.randn()*cov + mu
else:
# condition on all previous samples
u1 = mu[0]
u2 = mu[1:]
y2 = np.atleast_2d(np.array(y_all)).T
C11 = cov[:1,:1] # dependent sample
C12 = np.atleast_2d(cov[0,1:])
C21 = np.atleast_2d(cov[1:,0]).T
C22 = np.atleast_2d(cov[1:, 1:])
C22_ = la.inv(C22)
u = u1 + np.dot(C12, np.dot(C22_, (y2 - u2)))
C22_xC21 = np.dot(C22_, C21)
C_minus = np.dot(C12, C22_xC21) # this weirdly becomes larger than C!
C = C11 - C_minus
y = u + np.random.randn()*C
y_all = [y.flatten()[0]] + y_all
Here's an example with 32 samples, where it collapses:
enter image description here
Here's an example with 34 samples, where it explodes:
enter image description here
(for this particular kernel, 34 is the number of samples at which (or more) the samples start to diverge.

Understanding issue in Scilab for my code

I am doing a projectile motion where i need to plot curves between position x and y for various angles but the scilab shows only one plot. I am confused.
My code below
function[H,R,T]=projectile(m,r,h,c,rho,theta,v0,x0,y0,t0)
g=9.8
A=%pi*r^2
k=c*rho*A/2;
i=1
t(i)=t0
x(i)=x0
y(i)=y0
for j=0:5
thetha=theta+j*15;
vx(i)=v0*cos(thetha*%pi/180);
vy(i)=v0*sin(thetha*%pi/180);
while (y(i)>=0)
v=sqrt(vx(i)^2+vy(i)^2);
t(i+1)=t(i)+h;
vx(i+1)=vx(i)-h*(k*v*vx(i)/m);
vy(i+1)=vy(i)-h*(g+k*v*vy(i)/m);
x(i+1)=x(i)+h*vx(i)
y(i+1)=y(i)+h*vy(i)
i=i+1;
end
plot(x(i),y(i),'.');
end
n=i-1
R=x(n)-x(1);
T=t(n);
H=max(y)
endfunction
You should use vectors to improve compacity and readability of your code. Here is my proposition of improved (and working) code:
function [H,R,T] = projectile(m,r,h,c,rho,theta0,v0,x0,y0,t0)
g = 9.81
A = %pi*r^2
k = c*rho*A/2;
for theta = theta0 + (0:15:75)
v = v0*[cos(theta*%pi/180); sin(theta*%pi/180)];
t = t0
xy = [x0;y0]
i = 1
while xy(2,i) >= 0
t(i+1) = t(i)+h;
v = v + h*([0;-g] - k*norm(v)*v/m);
xy(:,i+1) = xy(:,i) + h*v;
i = i+1;
end
plot(xy(1,:), xy(2,:));
end
R = xy(1,$) - xy(1,1);
T = t($);
H = max(xy(2,:))
endfunction
clf
[H,R,T] = projectile(1,0.1,0.001,2,1000,5,1,0,0,0)

Probabilistic Record Linkage in Pandas

I have two dataframes (X & Y). I would like to link them together and to predict the probability that each potential match is correct.
X = pd.DataFrame({'A': ["One", "Two", "Three"]})
Y = pd.DataFrame({'A': ["One", "To", "Free"]})
Method A
I have not yet fully understood the theory but there is an approach presented in:
Sayers, A., Ben-Shlomo, Y., Blom, A.W. and Steele, F., 2015. Probabilistic record linkage. International journal of epidemiology, 45(3), pp.954-964.
Here is my attempt to implementat it in Pandas:
# Probability that Matches are True Matches
m = 0.95
# Probability that non-Matches are True non-Matches
u = min(len(X), len(Y)) / (len(X) * len(Y))
# Priors
M_Pr = u
U_Pr = 1 - M_Pr
O_Pr = M_Pr / U_Pr # Prior odds of a match
# Combine the dataframes
X['key'] = 1
Y['key'] = 1
Z = pd.merge(X, Y, on='key')
Z = Z.drop('key',axis=1)
X = X.drop('key',axis=1)
Y = Y.drop('key',axis=1)
# Levenshtein distance
def Levenshtein_distance(s1, s2):
if len(s1) > len(s2):
s1, s2 = s2, s1
distances = range(len(s1) + 1)
for i2, c2 in enumerate(s2):
distances_ = [i2+1]
for i1, c1 in enumerate(s1):
if c1 == c2:
distances_.append(distances[i1])
else:
distances_.append(1 + min((distances[i1], distances[i1 + 1], distances_[-1])))
distances = distances_
return distances[-1]
L_D = np.vectorize(Levenshtein_distance, otypes=[float])
Z["D"] = L_D(Z['A_x'], Z['A_y'])
# Max string length
def Max_string_length(X, Y):
return max(len(X), len(Y))
M_L = np.vectorize(Max_string_length, otypes=[float])
Z["L"] = M_L(Z['A_x'], Z['A_y'])
# Agreement weight
def Agreement_weight(D, L):
return 1 - ( D / L )
A_W = np.vectorize(Agreement_weight, otypes=[float])
Z["C"] = A_W(Z['D'], Z['L'])
# Likelihood ratio
def Likelihood_ratio(C):
return (m/u) - ((m/u) - ((1-m) / (1-u))) * (1-C)
L_R = np.vectorize(Likelihood_ratio, otypes=[float])
Z["G"] = L_R(Z['C'])
# Match weight
def Match_weight(G):
return math.log(G) * math.log(2)
M_W = np.vectorize(Match_weight, otypes=[float])
Z["R"] = M_W(Z['G'])
# Posterior odds
def Posterior_odds(R):
return math.exp( R / math.log(2)) * O_Pr
P_O = np.vectorize(Posterior_odds, otypes=[float])
Z["O"] = P_O(Z['R'])
# Probability
def Probability(O):
return O / (1 + O)
Pro = np.vectorize(Probability, otypes=[float])
Z["P"] = Pro(Z['O'])
I have verified that this gives the same results as in the paper. Here is a sensitivity check on m, showing that it doesn't make a lot of difference:
Method B
These assumptions won't apply to all applications but in some cases each row of X should match a row of Y. In that case:
The probabilities should sum to 1
If there are many credible candidates to match to then that should reduce the probability of getting the right one
then:
X["I"] = X.index
# Combine the dataframes
X['key'] = 1
Y['key'] = 1
Z = pd.merge(X, Y, on='key')
Z = Z.drop('key',axis=1)
X = X.drop('key',axis=1)
Y = Y.drop('key',axis=1)
# Levenshtein distance
def Levenshtein_distance(s1, s2):
if len(s1) > len(s2):
s1, s2 = s2, s1
distances = range(len(s1) + 1)
for i2, c2 in enumerate(s2):
distances_ = [i2+1]
for i1, c1 in enumerate(s1):
if c1 == c2:
distances_.append(distances[i1])
else:
distances_.append(1 + min((distances[i1], distances[i1 + 1], distances_[-1])))
distances = distances_
return distances[-1]
L_D = np.vectorize(Levenshtein_distance, otypes=[float])
Z["D"] = L_D(Z['A_x'], Z['A_y'])
# Max string length
def Max_string_length(X, Y):
return max(len(X), len(Y))
M_L = np.vectorize(Max_string_length, otypes=[float])
Z["L"] = M_L(Z['A_x'], Z['A_y'])
# Agreement weight
def Agreement_weight(D, L):
return 1 - ( D / L )
A_W = np.vectorize(Agreement_weight, otypes=[float])
Z["C"] = A_W(Z['D'], Z['L'])
# Normalised Agreement Weight
T = Z .groupby('I') .agg({'C' : sum})
D = pd.DataFrame(T)
D.columns = ['T']
J = Z.set_index('I').join(D)
J['P1'] = J['C'] / J['T']
Comparing it against Method A:
Method C
This combines method A with method B:
# Normalised Probability
U = Z .groupby('I') .agg({'P' : sum})
E = pd.DataFrame(U)
E.columns = ['U']
K = Z.set_index('I').join(E)
K['P1'] = J['P1']
K['P2'] = K['P'] / K['U']
We can see that method B (P1) doesn't take account of uncertainty whereas method C (P2) does.