I'm working with 2 imu's. I need to offset all frames with the first frame from the sensor. I have created a fictive scenario, where I precisely know the rotation and the wanted result. I need the two sensors to show the same result when their initial (start) orientation is subtracted.
import numpy as np
# Sensor 0,1 and 2 start orientation in degrees
s0_x = 30
s0_y = 0
s0_z = 0
s1_x = 0
s1_y = 40
s1_z = 0
s2_x = 10
s2_y = 40
s2_z= -10
# Change from start frame 1
x1 = 20
y1 = 10
z1 = 0
# Change from start frame 2
x2 = 60
y2 = 30
z2 = 30
GCS= [[1,0,0],[0,1,0],[0,0,1]]
sensor0 = [[s0_x, s0_y, s0_z], [s0_x, s0_y, s0_z], [s0_x, s0_y, s0_z]]
sensor1 = [[s1_x, s1_y, s1_z], [s1_x + x1, s1_y + y1, s1_z + z1],[s1_x + x1 + x2, s1_y + y1+ y2, s1_z + z1+ z2]]
sensor2 = [[s2_x, s2_y, s2_z], [s2_x + x1, s2_y + y1, s2_z + z1], [s2_x + x1+ x2, s2_y + y1+ y2, s2_z + z1+ z2]]
def Rot_Mat_X(theta):
r = np.array([[1,0,0],[0,np.cos(np.deg2rad(theta)),-np.sin(np.deg2rad(theta))],[0,np.sin(np.deg2rad(theta)),np.cos(np.deg2rad(theta))]])
return r
# rotation the rotation matrix around the Y axis (input in deg)
def Rot_Mat_Y(theta):
r = np.array([[np.cos(np.deg2rad(theta)),0,np.sin(np.deg2rad(theta))],
[0,1,0],
[-np.sin(np.deg2rad(theta)),0,np.cos(np.deg2rad(theta))]])
return r
# rotation the rotation matrix around the Z axis (input in deg)
def Rot_Mat_Z(theta):
r = np.array([[np.cos(np.deg2rad(theta)),-np.sin(np.deg2rad(theta)),0],
[np.sin(np.deg2rad(theta)),np.cos(np.deg2rad(theta)),0],
[0,0,1]])
return r
# Creating the rotation matrices
r_sensor0 = []
r_sensor1= []
r_sensor2= []
for i in range(len(sensor1)):
r_sensor1_z = np.matmul(Rot_Mat_X(sensor1[i][0]),GCS)
r_sensor1_zy = np.matmul(Rot_Mat_Y(sensor1[i][1]),r_sensor1_z)
r_R_Upperarm_medial_zyx = np.matmul(Rot_Mat_Z(sensor1[i][2]),r_sensor1_zy )
r_sensor1.append(r_R_Upperarm_medial_zyx )
r_sensor2_z = np.matmul(Rot_Mat_X(sensor2[i][0]),GCS)
r_sensor2_zy = np.matmul(Rot_Mat_Y(sensor2[i][1]),r_sensor2_z )
r_sensor2_zyx = np.matmul(Rot_Mat_Z(sensor2[i][2]),r_sensor2_zy )
r_sensor2.append(r_sensor2_zyx )
r_start_sensor1 = r_sensor1[0]
r_start_sensor2 = r_sensor2[0]
r_offset_sensor1 = []
r_offset_sensor2 = []
for i in range(len(sensor0)):
r_offset_sensor1.append(np.matmul(np.transpose(r_start_sensor1),r_sensor1[i]))
r_offset_sensor2.append(np.matmul(np.transpose(r_start_sensor2),r_sensor2[i]))
# result:
r_offset_sensor1[0] = [[1,0,0],[0,1,0],[0,0,1]]
r_offset_sensor1[1] = [[0.984,0.059,0.163],[0,0.939,-0.342],[-0.173,0.336,0.925]]
r_offset_sensor1[2] = [[0.748,0.466,0.471],[0.086,0.635,-0.767],[-0.657,0.615,0.434]]
r_offset_sensor2[0] = [[1,0,0],[0,1,0],[0,0,1]]
r_offset_sensor2[1] = [[0.984,0.086,0.150],[-0.03,0.938,-0.344],[-0.171,0.334,0.926]]
r_offset_sensor2[2] = [[0.748,0.541,0.383],[-0.028,0.603,-0.797],[-0.662,0.585,0.466]]
I expect the result of sensors 1 and 2 to be equal for all frames but it doesn't? And they should be:
frame[0] = [1,0,0],[0,1,0],[0,0,1]
frame[1] = [0.984,0,0.173],[0.059,0.939,-0.336],[-0.163,0.342,0.9254]
frame[2] = [0.750,-0.433,0.50],[0.625,0.216,-0.750],[0.216,0.875,0.433]
Related
MLESAC is better than RANSAC by calculating likelihood rather than counting numbers of inliers.
(Torr and Zisserman 2000)
So there is no reason to use RANSAC if we use MLESAC. But when I implied on the plane fitting problem, I got a worse result than RANSAC. It came out similar p_i when I substituted distance errors of each data in equation 19, leading wrong negative log likelihood.
%% MLESAC (REF.PCL)
% data
clc;clear; close all;
f = #(a_hat,b_hat,c_hat,x,y)a_hat.*x+b_hat.*y+c_hat; % z
a = 1;
b = 1;
c = 20;
width = 10;
range = (-width:0.01:width)'; % different from linespace
x = -width+(width-(-width))*rand(length(range),1); % r = a + (b-a).*rand(N,1)
y = -width+(width-(-width))*rand(length(range),1);
X = (-width:0.5:width)';
Y = (-width:0.5:width)';
[X,Y] = meshgrid(X,Y); % for drawing surf
Z = f(a/c,b/c,c/c,X,Y);
z_n = f(a/c,b/c,c/c,x,y); % z/c
% add noise
r = 0.3;
noise = r*randn(size(x));
z_n = z_n + noise;
% add outliers
out_rng = find(y>=8,200);
out_udel = 5;
z_n(out_rng) = z_n(out_rng) + out_udel;
plot3(x,y,z_n,'b.');hold on;
surf(X,Y,Z);hold on;grid on ;axis equal;
p_n = [x y z_n];
num_pt = size(p_n,1);
% compute sigma = median(dist (x - median (x)))
threshold = 0.3; %%%%%%%%% user-defined
medianx = median(p_n(:,1));
mediany = median(p_n(:,2));
medianz = median(p_n(:,3));
medianp = [medianx mediany medianz];
mediadist = median(sqrt(sum((p_n - medianp).*(p_n - medianp),2)));
sigma = mediadist * threshold;
% compute the bounding box diagonal
maxx = max(p_n(:,1));
maxy = max(p_n(:,2));
maxz = max(p_n(:,3));
minx = min(p_n(:,1));
miny = min(p_n(:,2));
minz = min(p_n(:,3));
bound = [maxx maxy maxz]-[minx miny minz];
v = sqrt(sum(bound.*bound,2));
%% iteration
iteration = 0;
num_inlier = 0;
max_iteration = 10000;
max_num_inlier = 0;
k = 1;
s = 5; % number of sample point
probability = 0.99;
d_best_penalty = 100000;
dist_scaling_factor = -1 / (2.0*sigma*sigma);
normalization_factor = 1 / (sqrt(2*pi)*sigma);
Gaussian = #(gamma,disterr,sig)gamma * normalization_factor * exp(disterr.^2*dist_scaling_factor);
Uniform = #(gamma,v)(1-gamma)/v;
while(iteration < k)
% get sample
rand_var = randi([1 length(x)],s,1);
% find coeff. & inlier
A_rand = [p_n(rand_var,1:2) ones(size(rand_var,1),1)];
y_est = p_n(rand_var,3);
Xopt = pinv(A_rand)*y_est;
disterr = abs(sum([p_n(:,1:2) ones(size(p_n,1),1)].*Xopt',2) - p_n(:,3))./sqrt(dot(Xopt',Xopt'));
inlier = find(disterr <= threshold);
outlier = find(disterr >= threshold);
num_inlier = size(inlier,1);
outlier_num = size(outlier,1);
% EM
gamma = 0.5;
iterations_EM = 3;
for i = 1:iterations_EM
% Likelihood of a datam given that it is an inlier
p_i = Gaussian(gamma,disterr,sigma);
% Likelihood of a datum given that it is an outlier
p_o = Uniform(gamma,v);
zi = p_i./(p_i + p_o);
gamma = sum(zi)/num_pt;
end
% Find the log likelihood of the mode -L
d_cur_pentnalty = -sum(log(p_i+p_o));
if(d_cur_pentnalty < d_best_penalty)
d_best_penalty = d_cur_pentnalty;
% record inlier
best_inlier = p_n(inlier,:);
max_num_inlier = num_inlier;
best_model = Xopt;
% Adapt k
w = max_num_inlier / num_pt;
p_no_outliers = 1 - w^s;
k = log(1-probability)/log(p_no_outliers);
end
% RANSAC
% if (num_inlier > max_num_inlier)
% max_num_inlier = num_inlier;
% best_model = Xopt;
%
% % Adapt k
% w = max_num_inlier / num_pt;
% p_no_outliers = 1 - w^s;
% k = log(1-probability)/log(p_no_outliers);
% end
iteration = iteration + 1;
if iteration > max_iteration
break;
end
end
a_est = best_model(1,:);
b_est = best_model(2,:);
c_est = best_model(3,:);
Z_opt = f(a_est,b_est,c_est,X,Y);
new_sur = mesh(X,Y,Z_opt,'edgecolor', 'r','FaceAlpha',0.5); % estimate
title('MLESAC',sprintf('original: a/c = %.2f, b/c = %.2f, c/c = %.2f\n new: a/c = %.2f, b/c = %.2f, c/c = %.2f',a/c,b/c,c/c,a_est,b_est,c_est));
The reference of my source code is from PCL(MLESAC), and I coded it in MATLAB.
I'm trying to plot the multiple values one gets for 'f_12' over a certain number of iterations. It should look something like points with high oscillations when there is low iterations 'N' and then it converges to a rough value of 0.204. I'm getting the correct outputs for 'f_12' but I'm having a really hard time doing the plots. New to python here.
start = time.time()
# looking for F_12 via monte carlo method
# Inputs
# N = number of rays to generate
N = 1000
# w = width of plates
w = 1
# h = vertical seperation of plates
# L = horizontal offset of plates (L=w=h)
L = 1
h = 1
p_points = 100
# counter for number of rays and number of hits
rays = 0
hits = 0
while rays < N:
rays = rays + 1
# random origin of rays along w on surface 1
Rx = random.uniform(0, 1)
Rt = random.uniform(0, 1)
Rph = random.uniform(0, 1)
x1 = Rx * w
# polar and azimuth angles - random ray directions
theta = np.arcsin(np.sqrt(Rt))
phi = 2*np.pi*Rph
# theta = np.arcsin(Rt)
xi = x1 + h*np.tan(theta)*np.cos(phi)
if xi >= L and xi <= (L+w):
hit = 1
else:
hit = 0
hits = hits + hit
gap = N/ p_points
r = rays%gap
if r == 0:
F = hits/ rays
plt.figure(figsize=(8, 4))
plt.plot(N, F, linewidth=2)
plt.xlabel("N - Rays")
plt.ylabel("F_12")
plt.show()
f_12 = hits/ N
print(f"F_12 = {f_12} at N = {N} iterations")
# Grab Currrent Time After Running the Code
end = time.time()
#Subtract Start Time from The End Time
total_time = end - start
f_time = round(total_time)
print(f"Running time = {f_time} seconds")
I need to draw a contourplot with function defined on a hexagonal area of points. I build this function using three separate meshgrids and then draw all contourplots on one axis. This looks something like this:
steps = int(k0/kstep0) # just a parameter of how many points are taken in hexagon
energies1 = np.zeros((steps,steps))
energies2 = np.zeros((steps,steps))
energies3 = np.zeros((steps,steps))
gridi = np.arange(steps)
gridj = np.arange(steps)
iv,jv = np.meshgrid(gridi,gridj)
Kx1 = -0.5*np.sqrt(3)*k0
Ky1 = -0.5*k0 # Dirac point coordinates
kstepix1 = 0
kstepiy1 = kstep0
kstepjx1 = 0.5*np.sqrt(3)*kstep0
kstepjy1 = -0.5*kstep0
kxv1 = kstepix1*jv+kstepjx1*iv+Kx1
kyv1 = kstepiy1*jv+kstepjy1*iv+Ky1
Kx2 = 0
Ky2 = k0 # Dirac point coordinates
kstepix2 = 0.5*np.sqrt(3)*kstep0
kstepiy2 = -0.5*kstep0
kstepjx2 = -0.5*np.sqrt(3)*kstep0
kstepjy2 = -0.5*kstep0
kxv2 = kstepix2*iv+kstepjx2*jv+Kx2
kyv2 = kstepiy2*iv+kstepjy2*jv+Ky2
Kx3 = 0.5*np.sqrt(3)*k0
Ky3 = -0.5*k0 # Dirac point coordinates
kstepix3 = -0.5*np.sqrt(3)*kstep0
kstepiy3 = -0.5*kstep0
kstepjx3 = 0
kstepjy3 = kstep0
kxv3 = kstepix3*jv+kstepjx3*iv+Kx3
kyv3 = kstepiy3*jv+kstepjy3*iv+Ky3
for i in np.arange(steps):
for j in np.arange(steps):
kx = i*kstepix1 + j*kstepjx1 + Kx1
ky = i*kstepiy1 + j*kstepjy1 + Ky1
ham = TwistHamiltonian(kx,ky,angle,N,t_layers) # here I solve some matrix and extract its eigenvalues
eigenvalues, eigenvectors = np.linalg.eigh(ham)
energies1[i,j] = np.min(np.abs(eigenvalues))
for i in np.arange(steps):
for j in np.arange(steps):
kx = i*kstepix2 + j*kstepjx2 + Kx2
ky = i*kstepiy2 + j*kstepjy2 + Ky2
ham = TwistHamiltonian(kx,ky,angle,N,t_layers)
eigenvalues, eigenvectors = np.linalg.eigh(ham)
energies2[i,j] = np.min(np.abs(eigenvalues))
for i in np.arange(steps):
for j in np.arange(steps):
kx = i*kstepix3 + j*kstepjx3 + Kx3
ky = i*kstepiy3 + j*kstepjy3 + Ky3
ham = TwistHamiltonian(kx,ky,angle,N,t_layers)
eigenvalues, eigenvectors = np.linalg.eigh(ham)
energies3[i,j] = np.min(np.abs(eigenvalues))
from matplotlib import pyplot as plt
from matplotlib.cm import ScalarMappable
save_to = '../plots/ContourPlots/TwistEnergy'+'kstep0_'+str(kstep0)+'tlayers_'+str(t_layers)+'theta'+str(theta)[:5]+"_"+str(N)+'.png'
fig, ax = plt.subplots(figsize=(9,9))
cp1 = ax.contourf(kxv1,kyv1,energies1,cmap='RdGy')
cp2 = ax.contourf(kxv2,kyv2,energies2,cmap='RdGy')
cp3 = ax.contourf(kxv3,kyv3,energies3,cmap='RdGy')
The results are close to desired example of output image. However, the coloring of the three pieces is slightly different and it messes the whole pucture. How can I improve this situation?
I have a dataframe of essentially random numbers, (except for one column), some of which are NaNs. MWE:
import numpy as np
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import pandas as pd
randomNumberGenerator = np.random.RandomState(1000)
z = 5 * randomNumberGenerator.rand(101)
A = 4 * z - 3+ randomNumberGenerator.randn(101)
B = 4 * z - 2+ randomNumberGenerator.randn(101)
C = 4 * z - 1+ randomNumberGenerator.randn(101)
D = 4 * z - 4+ randomNumberGenerator.randn(101)
A[50] = np.nan
A[:3] = np.nan
B[12:20] = np.nan
sources= pd.DataFrame({'z': z})
sources['A'] = A
sources['B'] = B
sources['C'] = C
sources['D'] = D
#sources= sources.dropna()
x = sources.z
y1 = sources.A
y2 = sources.B
y3 = sources.C
y4 = sources.D
for i in [y1, y2, y3, y4]:
count = np.count_nonzero(~np.logical_or(np.isnan(x), np.isnan(i)))
label = 'Points plotted: %d'%count
plt.scatter(x, i, label = label)
plt.legend()
I need to bin the data according to x and plot different columns in each bin, in 3 side-by-side subplots:
x_1 <= 1 plot A-B | 1 < x_2 < 3 plot B+C | 3 < x_3 plot C-D
I've tried to bin the data with
x1 = sources[sources['z']<1] # z < 1
x2 = sources[sources['z']<3]
x2 = x2[x2['z']>=1] # 1<= z < 3
x3 = sources[sources['z']<max(z)]
x3 = x3[x3['z']>=3] # 3 <= z <= max(z)
x1 = x1['z']
x2 = x2['z']
x3 = x3['z']
but there's got to be a better way to go about it. What's the best way to produce something like this?
For binning in pandas is used cut, so solution is:
sources= pd.DataFrame({'z': z})
sources['A'] = A
sources['B'] = B
sources['C'] = C
sources['D'] = D
#sources= sources.dropna()
bins = pd.cut(sources['z'], [-np.inf, 1, 3, max(z)], labels=[1,2,3])
m1 = bins == 1
m2 = bins == 2
m3 = bins == 3
x11 = sources.loc[m1, 'A']
x12 = sources.loc[m1, 'B']
x21 = sources.loc[m2, 'B']
x22 = sources.loc[m2, 'C']
x31 = sources.loc[m3, 'C']
x32 = sources.loc[m3, 'D']
y11 = sources.loc[m1, 'A']
y12 = sources.loc[m1, 'B']
y21 = sources.loc[m2, 'B']
y22 = sources.loc[m2, 'C']
y31 = sources.loc[m3, 'C']
y32 = sources.loc[m3, 'D']
tups = [(x11, x12, y11, y12), (x21, x22,y21, y22),(x31, x32, y31, y32)]
fig, ax = plt.subplots(1,3)
ax = ax.flatten()
for k, (i1, i2, j1, j2) in enumerate(tups):
count1 = np.count_nonzero(~np.logical_or(np.isnan(i1), np.isnan(j1)))
count2 = np.count_nonzero(~np.logical_or(np.isnan(i2), np.isnan(j2)))
label1 = 'Points plotted: %d'%count1
label2 = 'Points plotted: %d'%count2
ax[k].scatter(i1, j1, label = label1)
ax[k].scatter(i2, j2, label = label2)
ax[k].legend()
I have two dataframes (X & Y). I would like to link them together and to predict the probability that each potential match is correct.
X = pd.DataFrame({'A': ["One", "Two", "Three"]})
Y = pd.DataFrame({'A': ["One", "To", "Free"]})
Method A
I have not yet fully understood the theory but there is an approach presented in:
Sayers, A., Ben-Shlomo, Y., Blom, A.W. and Steele, F., 2015. Probabilistic record linkage. International journal of epidemiology, 45(3), pp.954-964.
Here is my attempt to implementat it in Pandas:
# Probability that Matches are True Matches
m = 0.95
# Probability that non-Matches are True non-Matches
u = min(len(X), len(Y)) / (len(X) * len(Y))
# Priors
M_Pr = u
U_Pr = 1 - M_Pr
O_Pr = M_Pr / U_Pr # Prior odds of a match
# Combine the dataframes
X['key'] = 1
Y['key'] = 1
Z = pd.merge(X, Y, on='key')
Z = Z.drop('key',axis=1)
X = X.drop('key',axis=1)
Y = Y.drop('key',axis=1)
# Levenshtein distance
def Levenshtein_distance(s1, s2):
if len(s1) > len(s2):
s1, s2 = s2, s1
distances = range(len(s1) + 1)
for i2, c2 in enumerate(s2):
distances_ = [i2+1]
for i1, c1 in enumerate(s1):
if c1 == c2:
distances_.append(distances[i1])
else:
distances_.append(1 + min((distances[i1], distances[i1 + 1], distances_[-1])))
distances = distances_
return distances[-1]
L_D = np.vectorize(Levenshtein_distance, otypes=[float])
Z["D"] = L_D(Z['A_x'], Z['A_y'])
# Max string length
def Max_string_length(X, Y):
return max(len(X), len(Y))
M_L = np.vectorize(Max_string_length, otypes=[float])
Z["L"] = M_L(Z['A_x'], Z['A_y'])
# Agreement weight
def Agreement_weight(D, L):
return 1 - ( D / L )
A_W = np.vectorize(Agreement_weight, otypes=[float])
Z["C"] = A_W(Z['D'], Z['L'])
# Likelihood ratio
def Likelihood_ratio(C):
return (m/u) - ((m/u) - ((1-m) / (1-u))) * (1-C)
L_R = np.vectorize(Likelihood_ratio, otypes=[float])
Z["G"] = L_R(Z['C'])
# Match weight
def Match_weight(G):
return math.log(G) * math.log(2)
M_W = np.vectorize(Match_weight, otypes=[float])
Z["R"] = M_W(Z['G'])
# Posterior odds
def Posterior_odds(R):
return math.exp( R / math.log(2)) * O_Pr
P_O = np.vectorize(Posterior_odds, otypes=[float])
Z["O"] = P_O(Z['R'])
# Probability
def Probability(O):
return O / (1 + O)
Pro = np.vectorize(Probability, otypes=[float])
Z["P"] = Pro(Z['O'])
I have verified that this gives the same results as in the paper. Here is a sensitivity check on m, showing that it doesn't make a lot of difference:
Method B
These assumptions won't apply to all applications but in some cases each row of X should match a row of Y. In that case:
The probabilities should sum to 1
If there are many credible candidates to match to then that should reduce the probability of getting the right one
then:
X["I"] = X.index
# Combine the dataframes
X['key'] = 1
Y['key'] = 1
Z = pd.merge(X, Y, on='key')
Z = Z.drop('key',axis=1)
X = X.drop('key',axis=1)
Y = Y.drop('key',axis=1)
# Levenshtein distance
def Levenshtein_distance(s1, s2):
if len(s1) > len(s2):
s1, s2 = s2, s1
distances = range(len(s1) + 1)
for i2, c2 in enumerate(s2):
distances_ = [i2+1]
for i1, c1 in enumerate(s1):
if c1 == c2:
distances_.append(distances[i1])
else:
distances_.append(1 + min((distances[i1], distances[i1 + 1], distances_[-1])))
distances = distances_
return distances[-1]
L_D = np.vectorize(Levenshtein_distance, otypes=[float])
Z["D"] = L_D(Z['A_x'], Z['A_y'])
# Max string length
def Max_string_length(X, Y):
return max(len(X), len(Y))
M_L = np.vectorize(Max_string_length, otypes=[float])
Z["L"] = M_L(Z['A_x'], Z['A_y'])
# Agreement weight
def Agreement_weight(D, L):
return 1 - ( D / L )
A_W = np.vectorize(Agreement_weight, otypes=[float])
Z["C"] = A_W(Z['D'], Z['L'])
# Normalised Agreement Weight
T = Z .groupby('I') .agg({'C' : sum})
D = pd.DataFrame(T)
D.columns = ['T']
J = Z.set_index('I').join(D)
J['P1'] = J['C'] / J['T']
Comparing it against Method A:
Method C
This combines method A with method B:
# Normalised Probability
U = Z .groupby('I') .agg({'P' : sum})
E = pd.DataFrame(U)
E.columns = ['U']
K = Z.set_index('I').join(E)
K['P1'] = J['P1']
K['P2'] = K['P'] / K['U']
We can see that method B (P1) doesn't take account of uncertainty whereas method C (P2) does.