Skip to content

Commit

Permalink
Merge branch 'noetic-devel' of github.com:lardemua/atom into bruno
Browse files Browse the repository at this point in the history
  • Loading branch information
brunofavs committed May 10, 2024
2 parents 8f77f42 + 22ceee5 commit 456931a
Show file tree
Hide file tree
Showing 3 changed files with 99 additions and 38 deletions.
4 changes: 2 additions & 2 deletions atom_evaluation/scripts/other_calibrations/cv_eye_in_hand.py
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -331,14 +331,14 @@ def main():
# --------------------------------------------------
h_T_c_ground_truth = getTransform(from_frame=args['hand_link'],
to_frame=dataset['calibration_config']['sensors'][args['camera']]['link'],
transforms=dataset['collections'][selected_collection_key]['transforms'])
transforms=dataset_ground_truth['collections'][selected_collection_key]['transforms'])
print(Fore.GREEN + 'Ground Truth h_T_c=\n' + str(h_T_c_ground_truth))

print('estimated h_T_c=\n' + str(h_T_c))

translation_error, rotation_error, _, _, _, _, _, _ = compareTransforms(
h_T_c, h_T_c_ground_truth)
print('Etrans = ' + str(round(translation_error*1000, 3)) + ' (m)')
print('Etrans = ' + str(round(translation_error*1000, 3)) + ' (mm)')
print('Erot = ' + str(round(rotation_error*180/math.pi, 3)) + ' (deg)')

# --------------------------------------------------
Expand Down
63 changes: 33 additions & 30 deletions atom_evaluation/scripts/other_calibrations/cv_eye_to_hand.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
from atom_calibration.calibration.visualization import getCvImageFromCollectionSensor
from atom_core.atom import getChain, getTransform
from atom_core.dataset_io import filterCollectionsFromDataset, loadResultsJSON, saveAtomDataset
from atom_core.geometry import matrixToTranslationRotation, translationRotationToTransform, traslationRodriguesToTransform
from atom_core.geometry import matrixToTranslationRotation, translationRotationToTransform, traslationRodriguesToTransform, translationQuaternionToTransform
from atom_core.naming import generateKey
from atom_core.transformations import compareTransforms
from atom_core.utilities import atomError, compareAtomTransforms
Expand Down Expand Up @@ -196,16 +196,16 @@ def main():
# ---------------------------------------
# Hand eye calibration (in a eye-in-hand configuration) has 4 different transformations.
#
# robot-base to hand b_T_h (obtained from direct kinematics)
# hand to robot-base h_T_b (obtained from direct kinematics)
# camera to pattern c_T_p (obtained through the detection of the known pattern)
# hand to camera h_T_c (estimated by the calibration)
# robot-base to pattern b_T_p (estimated by the calibration)
# We will use this notation throughout the code.

c_T_p_lst = [] # list of camera to pattern 4x4 transforms.
b_T_h_lst = [] # list of base to hand 4x4 transforms.
h_T_b_lst = [] # list of base to hand 4x4 transforms.

# Iterate all collections and create the lists of transforms c_T_p and b_T_h
# Iterate all collections and create the lists of transforms c_T_p and h_T_b
# NOTE: cannot test with a single collection. We need at least three. Check
# https://docs.opencv.org/4.x/d9/d0c/group__calib3d.html#ga41b1a8dd70eae371eba707d101729c36
for collection_key, collection in dataset['collections'].items():
Expand Down Expand Up @@ -265,52 +265,52 @@ def main():
cv2.destroyWindow(window_name)

# ---------------------------------------
# Get b_T_h using direct kinematics
# Get h_T_b using direct kinematics
# ---------------------------------------
# We will use the atom builtin functions to get the transform directly
b_T_h = getTransform(from_frame=args['base_link'],
to_frame=args['hand_link'],
h_T_b = getTransform(from_frame=args['hand_link'],
to_frame=args['base_link'],
transforms=collection['transforms'])
b_T_h_lst.append(b_T_h)
h_T_b_lst.append(h_T_b)

# ---------------------------------------
# Run hand eye calibration
# ---------------------------------------
# We will use opencv's function calibrateHandEye()
# https://docs.opencv.org/4.5.4/d9/d0c/group__calib3d.html#gaebfc1c9f7434196a374c382abf43439b
# It requires a list of c_T_p and b_T_h transformations.
# It requires a list of c_T_p and h_T_b transformations.
# Transformations are represented by separate rotation (3x3) and translation (3x1) components, so we will have two lists per transformations.

# NOTE instead of lists we could also use np arrays like this, same result.
# n = len(c_T_p_lst)
# c_T_p_Rs = np.zeros((n, 3, 3))
# c_T_p_ts = np.zeros((n, 3, 1))
# b_T_h_Rs = np.zeros((n, 3, 3))
# b_T_h_ts = np.zeros((n, 3, 1))
# h_T_b_Rs = np.zeros((n, 3, 3))
# h_T_b_ts = np.zeros((n, 3, 1))

c_T_p_Rs = [] # list of rotations for c_T_p.
c_T_p_ts = [] # list of translations for c_T_p
b_T_h_Rs = [] # list of rotations for b_T_h
b_T_h_ts = [] # list of translations for b_T_h
h_T_b_Rs = [] # list of rotations for h_T_b
h_T_b_ts = [] # list of translations for h_T_handrobot-baseh_T_bh_T_bh_T_bh_T_b:

for c_T_p, b_T_h in zip(c_T_p_lst, b_T_h_lst):
for c_T_p, h_T_b in zip(c_T_p_lst, h_T_b_lst):
# --- c_T_p camera to pattern
c_T_p_t, c_T_p_R = matrixToTranslationRotation(c_T_p)
c_T_p_Rs.append(c_T_p_R)
c_T_p_ts.append(c_T_p_t)

# --- b_T_h base to hand
b_T_h_t, b_T_h_R = matrixToTranslationRotation(b_T_h)
b_T_h_Rs.append(b_T_h_R)
b_T_h_ts.append(b_T_h_t)
# --- h_T_b base to hand
h_T_b_t, h_T_b_R = matrixToTranslationRotation(h_T_b)
h_T_b_Rs.append(h_T_b_R)
h_T_b_ts.append(h_T_b_t)

# ---------------------------------------
# Run hand eye calibration using calibrateHandEye
# ---------------------------------------

# In an eye-to-hand configuration, it returns b_T_c instead of h_T_c
b_T_c_R, b_T_c_t = cv2.calibrateHandEye(b_T_h_Rs,
b_T_h_ts,
b_T_c_R, b_T_c_t = cv2.calibrateHandEye(h_T_b_Rs,
h_T_b_ts,
c_T_p_Rs,
c_T_p_ts,
method=method)
Expand All @@ -321,24 +321,27 @@ def main():

# Extract the transformation marked for calibration which is the
# cp_T_cc, where cp (calibration parent) and cc (calibration child).
# So we need to get cp_T_cc from the estimated h_T_c.
# So we need to get cp_T_cc from the estimated b_T_c.
# We use the following:
# b_T_c = b_T_cp * cp_T_cc * cc_T_c
# <=> cp_T_cc = (b_T_cp)-1 * b_T_c * (cc_T_c)-1
# <=> cp_T_cc = (b_T_cp)-1 * b_T_c * (cc_T_c)-1
# <=> cp_T_cc = cp_T_b * b_T_c * c_T_cc

calibration_parent = dataset['calibration_config']['sensors'][args['camera']]['parent_link']
calibration_child = dataset['calibration_config']['sensors'][args['camera']]['child_link']

b_T_cp = getTransform(from_frame=args['base_link'],
to_frame=calibration_parent,
cp_T_b = getTransform(from_frame=calibration_parent,
to_frame=args['base_link'],
transforms=dataset['collections'][selected_collection_key]['transforms'])


cc_T_c = getTransform(from_frame=calibration_child,
to_frame=dataset['calibration_config']['sensors'][args['camera']]['link'],
transforms=dataset['collections'][selected_collection_key]['transforms'])

cp_T_cc = inv(b_T_cp) @ b_T_c @ inv(cc_T_c)
c_T_cc = getTransform(from_frame=dataset['calibration_config']['sensors'][args['camera']]['link'],
to_frame=calibration_child,
transforms=dataset['collections'][selected_collection_key]['transforms'])

cp_T_cc = cp_T_b @ b_T_c @ c_T_cc

# Save to dataset
# Since the transformation cp_T_cc is static we will save the same transform to all collections
frame_key = generateKey(calibration_parent, calibration_child)
Expand All @@ -356,14 +359,14 @@ def main():
# --------------------------------------------------
b_T_c_ground_truth = getTransform(from_frame=args['base_link'],
to_frame=dataset['calibration_config']['sensors'][args['camera']]['link'],
transforms=dataset['collections'][selected_collection_key]['transforms'])
transforms=dataset_ground_truth['collections'][selected_collection_key]['transforms'])
print(Fore.GREEN + 'Ground Truth b_T_c=\n' + str(b_T_c_ground_truth))

print('estimated b_T_c=\n' + str(b_T_c))

translation_error, rotation_error, _, _, _, _, _, _ = compareTransforms(
b_T_c, b_T_c_ground_truth)
print('Etrans = ' + str(round(translation_error*1000, 3)) + ' (m)')
print('Etrans = ' + str(round(translation_error*1000, 3)) + ' (mm)')
print('Erot = ' + str(round(rotation_error*180/math.pi, 3)) + ' (deg)')

# --------------------------------------------------
Expand Down
70 changes: 64 additions & 6 deletions atom_evaluation/scripts/other_calibrations/rwhe_calib_ali.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,62 @@ def getCameraIntrinsicsFromDataset(dataset, camera):

return K, D, image_size

def li_calib(AA,BB):
# From here on out, the code is a direct translation of the MATLAB code

n = len(AA) # n is the number of collections

# Transform the 4x4xn AA 3-dimensional matrix to a 4x(4xn) 2-dimensional matrix like in the original script
AA_2d = np.zeros((4, 4*n))
for i in range(n):
AA_2d[:, 4*i:4*i+4] = AA[i]
# Do the same for BB
BB_2d = np.zeros((4, 4*n))
for i in range(n):
BB_2d[:, 4*i:4*i+4] = BB[i]


A = np.zeros((12*n, 24))
b = np.zeros((12*n, 1))
for i in range(1,n+1):
Ra = AA_2d[0:3,4*i-4:4*i-1]
Rb = BB_2d[0:3,4*i-4:4*i-1]
ta = AA_2d[0:3,4*i-1:4*i]
tb = BB_2d[0:3,4*i-1:4*i]

A[12*i-12:12*i-3,0:9] = np.kron(Ra, np.eye(3))
A[12*i-12:12*i-3,9:18] = np.kron(-1*np.eye(3), Rb.T)
A[12*i-3:12*i,9:18] = np.kron(np.eye(3), tb.T)
A[12*i-3:12*i,18:21] = -1*Ra
A[12*i-3:12*i,21:24] = np.eye(3)

b[12*i-3:12*i] = ta

# The equivalent of the \ operator in MATsingular value decomposition ofLAB is the numpy linalg.solve function
x = np.linalg.lstsq(A,b, rcond=None)
x = x[0] # x[0] is all we need, as it is the array returned by matlab's "\""

# Get X
X = x[0:9].reshape((3,3)).T
[u,s,v] = np.linalg.svd(X)
X = u @ v.T
if (np.linalg.det(X) < 0):
X = u @ np.diag([1,1,-1]) @ v.T
X = np.append(X, x[21:24], axis=1)
X = np.append(X, np.array([[0,0,0,1]]), axis=0)

# Get Y
Y = x[9:18].reshape((3,3)).T
[u,s,v] = np.linalg.svd(Y)
Y = u @ v.T
if (np.linalg.det(Y) < 0):
Y = u @ np.diag([1,1,-1]) @ v.T
Y = np.append(Y, x[21:24], axis=1)
Y = np.append(Y, np.array([[0,0,0,1]]), axis=0)

return X,Y


def main():

########################################
Expand All @@ -63,17 +119,17 @@ def main():
"returns True or False to indicate if the collection should be loaded (and used in the "
"optimization). The Syntax is lambda name: f(x), where f(x) is the function in python "
"language. Example: lambda name: int(name) > 5 , to load only collections 6, 7, and onward.")
ap.add_argument("-bln", "--base_link_name", type=str, required=False, default="base_link", help="Name of the robot base link frame.")
ap.add_argument("-eeln", "--end_effector_link_name", type=str, required=False, default="flange", help="Name of the end-effector link frame.")
ap.add_argument("-bl", "--base_link", type=str, required=False, default="base_link", help="Name of the robot base link frame.")
ap.add_argument("-hl", "--hand_link", type=str, required=False, default="flange", help="Name of the hand link frame.")
ap.add_argument("-c", "--camera", help="Camera sensor name.", type=str, required=True)
ap.add_argument("-p", "--pattern", help="Pattern to be used for calibration.", type=str, required=True)

args = vars(ap.parse_args())

json_file = args['json_file']
collection_selection_function = args['collection_selection_function']
base_link_name = args['base_link_name']
end_effector_link_name = args['end_effector_link_name']
base_link = args['base_link']
hand_link = args['hand_link']
camera = args['camera']
pattern = args['pattern']

Expand Down Expand Up @@ -145,8 +201,8 @@ def main():
# A is the transform from the gripper to the robot's base. We can get it from the dataset
transform_pool = collection['transforms']
A = getTransform(
from_frame=end_effector_link_name,
to_frame=base_link_name,
from_frame=hand_link,
to_frame=base_link,
transforms=transform_pool
)

Expand Down Expand Up @@ -175,6 +231,8 @@ def main():

BB.append(B)

X,Y = li_calib(AA,BB)


if __name__ == '__main__':
main()

0 comments on commit 456931a

Please sign in to comment.