Skip to content
Permalink
master
Switch branches/tags

Name already in use

A tag already exists with the provided branch name. Many Git commands accept both tag and branch names, so creating this branch may cause unexpected behavior. Are you sure you want to create this branch?
Go to file
 
 
Cannot retrieve contributors at this time
# -*- coding: utf-8 -*-
"""Final_pcl_app_gnc.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1wlx6y3OvcIormL-nGnODVr_rag1s4n9B
"""
"""**RegistrationGNC Method for Point Cloud** """
import numpy as np
from matplotlib import pyplot as plt
from scipy.optimize import minimize
import scipy.stats as stats
import random
import math
import time
# Weight Updation
def weight_update(mu,res,barc2,N):
weight=np.ones(N)
for j in range(N):
weight[j]=((mu*(barc2))/(res[j]+mu*(barc2)))**2
return weight
def pcl_problem(N,outlier_percentage):
# Generate random data points
source = np.random.rand(3, N)
# Apply arbitrary scale, translation and rotation
scale = 1.5
translation = np.array([[1], [1], [-1]])
rotation = np.array([[ 0.47639458, -0.72295936, -0.50037783],
[ 0.76249023, 0.0563402, 0.64454203],
[-0.43778631, -0.68858953, 0.57808962]])
dst = scale * np.matmul(rotation, source) + translation
num_outliers = int(outlier_percentage / 100 * N)
outlier_points = np.random.randint(0, N, num_outliers)
dst[:,outlier_points]=+10
return dst,source,num_outliers
def residuals(weights,dst,source,N):
#noise set in datasets
Noiselimit=0.01
w=weights
w=np.sqrt(w)
S_center = np.sum(source * weights.reshape(1, -1), axis=1) / np.sum(weights)
D_center = np.sum(dst * weights.reshape(1, -1), axis=1) / np.sum(weights)
weighted_mean_S = w.reshape(1, -1) * (source - S_center.reshape(-1, 1))
weighted_mean_D = w.reshape(1, -1) * (dst - D_center.reshape(-1, 1))
U, s, Vh = np.linalg.svd(weighted_mean_D @ weighted_mean_S.T) # Singular value decomposition of the resulting covariance matrix
R_ = U @ Vh
S_=np.sum(s)/np.sum(np.square(weighted_mean_S))
t_ = D_center - S_ * R_ @ S_center
residues=dst-S_*np.matmul(R_,source)-np.expand_dims(t_,axis=1)
residuals=np.sum(residues ** 2, axis=0)/Noiselimit
return residuals,S_,R_,t_
def gnc(tgt,src,N):
max_iteration=1000
barc2=1 #as residuals already divided by noiselimit
weights=np.ones(N)
res,Scl0,Rot0,Trans0=residuals(weights,tgt,src,N)
r=np.max(res)
mu=2*r/barc2
i=1
while i<max_iteration and mu>1:
weights=weight_update(mu,res,barc2,N)
res,Scl,Rot,Trans=residuals(weights,tgt,src,N)
mu=mu/1.4
i=i+1
return Scl,Rot,Trans,i
#Ground truth values of transformations
scale = 1.5
translation = np.array([[1], [1], [-1]])
rotation = np.array([[ 0.47639458, -0.72295936, -0.50037783],
[ 0.76249023, 0.0563402, 0.64454203],
[-0.43778631, -0.68858953, 0.57808962]])
#MonteCarlo Run
MC_run=20
Rot_err_gnc=np.zeros((20,10))
Trans_err_gnc=np.zeros((20,10))
Scale_err_gnc=np.zeros((20,10))
Iterations=np.zeros((20,10))
for j in range(MC_run):
outlier_percentage_arr=np.linspace(4,85,10)
for k in range(10):
tgt,src,num_outliers=pcl_problem(1000,outlier_percentage_arr[k])
start_time = time.time()
scale_tgt,Rotn,Trans_tgt,itr=gnc(tgt,src,1000)
end_time = time.time()
elapsed_time = end_time - start_time
Rot_err_gnc[j,k] = np.arccos(np.clip((np.trace(Rotn@rotation.T) - 1) / 2,-1,1)) * 180 / np.pi
Trans_err_gnc[j,k]=np.sqrt(np.sum((Trans_tgt-translation)))
Scale_err_gnc[j,k]=abs(scale_tgt-scale)
Iterations[j,k]=itr
print("Elapsed time_Gnc:", elapsed_time, "seconds")
"""**Using Ransac Method**"""
def RANSAC(dst,source,num_outliers):
threshold_noise = 0.01
# Maximum number of Iterations
num_iterations = 1000
sample_size = 5
best_transformation = None
best_num_inliers = 0
Min_num_inliers=1000-num_outliers
num_mc_runs=20
# Perform RANSAC
start_time=time.time()
i=0
while i<num_iterations and best_num_inliers<Min_num_inliers:
# Sample points from the two point clouds
sample_indices = np.random.choice(source.shape[1], size=sample_size, replace=False)
source_sample = source[:,sample_indices]
target_sample = dst[:,sample_indices]
s_center=np.mean(source_sample, axis=1)
# Compute the transformation between the two samples
src_centered = source_sample - np.expand_dims(np.mean(source_sample, axis=1),axis=1)
tgt_centered = target_sample - np.expand_dims(np.mean(target_sample, axis=1),axis=1)
C = tgt_centered@ src_centered.T #Covariance
U, s, Vt = np.linalg.svd(C)
R = U@Vt
S=np.sum(s)/np.sum(np.square(src_centered))
t = np.mean(target_sample, axis=1) - S*R@s_center
residues=dst-S*np.matmul(R,source)-np.expand_dims(t,axis=1)
residuals=np.sum(residues ** 2, axis=0)/1
num_inliers = np.sum(residuals< threshold_noise) #
# Update the best transformation and best number of inliers if the current transformation is better
if num_inliers > best_num_inliers:
best_transformation = (R, t)
best_num_inliers = num_inliers
i=i+1
R, t = best_transformation
return R,t,S,i
MC_run=20
Rot_err_Ran=np.zeros((20,10))
Trans_err_Ran=np.zeros((20,10))
Scale_err_Ran=np.zeros((20,10))
Iterations_Ran=np.zeros((20,10))
for j in range(MC_run):
outlier_percentage_arr=np.linspace(4,85,10)
for k in range(10):
tgt,src,outliers=pcl_problem(1000,outlier_percentage_arr[k])
start_time = time.time()
R,t,S,i=RANSAC(tgt,src,outliers)
stop_time = time.time()
elapsed_time = stop_time - start_time
#print("Iteration:",i)
#print("Translation:", t)
#print("Rotation matrix:", R)
#print("Scale:",S, np.linalg.norm(R, axis=0))
Rot_err_Ran[j,k] = np.arccos(np.clip((np.trace(R@rotation.T) - 1) / 2,-1,1)) * 180 / np.pi
Trans_err_Ran[j,k]=np.sqrt(np.sum((t-translation)))
Scale_err_Ran[j,k]=abs(S-scale)
Iterations_Ran[j,k]=i
print("Elapsed time_Gnc:", elapsed_time, "seconds")
""" **Different error Plots Vs Outlier Percentage**"""
fig, ax = plt.subplots()
ax.set_yscale('log')
for m in range(10):
if m==0:
b1=ax.plot(outlier_percentage_arr,Trans_err_gnc[m,:],'+',color='b',label='GNC')
b2=ax.plot(outlier_percentage_arr,Trans_err_Ran[m,:],'+',color='r',label='RANSAC')
plt.legend()
else:
b1=ax.plot(outlier_percentage_arr,Trans_err_gnc[m,:],'+',color='b')
b2=ax.plot(outlier_percentage_arr,Trans_err_Ran[m,:],'+',color='r')
plt.xlabel('Outlier percentage')
plt.ylabel('Translation error')
plt.show()
fig,axis=plt.subplots(figsize=(10,6))
axis.set_yscale('log')
bp1 = axis.boxplot(Trans_err_gnc, positions=np.array(range(len(outlier_percentage_arr)))*2.0-0.4,widths=0.4,sym='', boxprops=dict(color='blue'))
bp2 = axis.boxplot(Trans_err_Ran, positions=np.array(range(len(outlier_percentage_arr)))*2.0+0.4,widths=0.4,sym='',boxprops=dict(color='red'))
plt.xticks(range(0, len(outlier_percentage_arr)*2, 2), outlier_percentage_arr,rotation=45)
plt.xlabel('Outlier percentage')
plt.ylabel('Translation error')
plt.legend([bp1['boxes'][0], bp2['boxes'][0]], ['GNC', 'RANSAC'])
fig,axis=plt.subplots(figsize=(10,6))
#axis.set_yscale('log')
bp1 = axis.boxplot(Rot_err_gnc, positions=np.array(range(len(outlier_percentage_arr)))*2.0-0.4,widths=0.4,sym='', boxprops=dict(color='blue'))
bp2 = axis.boxplot(Rot_err_Ran, positions=np.array(range(len(outlier_percentage_arr)))*2.0+0.4,widths=0.4,sym='',boxprops=dict(color='red'))
# Customize the plot
plt.xticks(range(0, len(outlier_percentage_arr)*2, 2), outlier_percentage_arr,rotation=45)
plt.xlabel('Outlier percentage')
plt.ylabel('Rotation error(in deg)')
plt.legend([bp1['boxes'][0], bp2['boxes'][0]], ['GNC', 'RANSAC'])
fig,axis=plt.subplots(figsize=(10,6))
axis.set_yscale('log')
bp1 = axis.boxplot(Scale_err_gnc, positions=np.array(range(len(outlier_percentage_arr)))*2.0-0.4,widths=0.4,sym='', boxprops=dict(color='blue'))
bp2 = axis.boxplot(Scale_err_Ran, positions=np.array(range(len(outlier_percentage_arr)))*2.0+0.4,widths=0.4,sym='',boxprops=dict(color='red'))
# Customize the plot
plt.xticks(range(0, len(outlier_percentage_arr)*2, 2), outlier_percentage_arr,rotation=45)
plt.xlabel('Outlier percentage')
plt.ylabel('Scale error')
plt.legend([bp1['boxes'][0], bp2['boxes'][0]], ['GNC', 'RANSAC'])
fig,axis=plt.subplots(figsize=(10,6))
axis.set_yscale('log')
bp1 = axis.boxplot(Iterations, positions=np.array(range(len(outlier_percentage_arr)))*2.0-0.4,widths=0.4,sym='', boxprops=dict(color='blue'))
bp2 = axis.boxplot(Iterations_Ran, positions=np.array(range(len(outlier_percentage_arr)))*2.0+0.4,widths=0.4,sym='',boxprops=dict(color='red'))
# Customize the plot
plt.xticks(range(0, len(outlier_percentage_arr)*2, 2), outlier_percentage_arr,rotation=45)
#axis.set_xticklabels(int(outlier_percentage_arr))
plt.xlabel('Outlier percentage')
plt.ylabel('Iterations')
plt.legend([bp1['boxes'][0], bp2['boxes'][0]], ['GNC', 'RANSAC'])
print(Iterations_Ran)
print(outlier_percentage_arr)