Skip to content

Commit

Permalink
#912 cleanup.
Browse files Browse the repository at this point in the history
  • Loading branch information
miguelriemoliveira committed May 5, 2024
1 parent 4976d11 commit 7fa6b43
Showing 1 changed file with 81 additions and 67 deletions.
148 changes: 81 additions & 67 deletions atom_evaluation/scripts/other_calibrations/cv_eye_in_hand.py
Original file line number Diff line number Diff line change
@@ -1,41 +1,34 @@
#!/usr/bin/env python3

"""
Stereo calibration from opencv
Handeye calibration from opencv. Eye in hand variant.
"""

# -------------------------------------------------------------------------------
# --- IMPORTS
# -------------------------------------------------------------------------------

from copy import deepcopy
import math
from colorama import Fore
import os
import numpy as np
import cv2
import argparse
import os
import cv2
import tf
from colorama import Fore, Style
from copy import deepcopy
from numpy.linalg import inv

from matplotlib import cm
from prettytable import PrettyTable
import tf
from colorama import init as colorama_init

# from atom.atom_core.src.atom_core.geometry import matrixToTranslationRodrigues
from atom_calibration.calibration.visualization import getCvImageFromCollectionSensor
from atom_core.atom import getTransform
from atom_core.atom import getChain, getTransform
from atom_core.dataset_io import filterCollectionsFromDataset, loadResultsJSON, saveAtomDataset
from atom_core.geometry import matrixToTranslationRotation, translationRotationToTransform, traslationRodriguesToTransform
from atom_core.naming import generateKey
from atom_core.transformations import compareTransforms
from atom_core.utilities import atomError, compareAtomTransforms, getNumberQualifier, printComparisonToGroundTruth
from atom_core.vision import projectToCamera
from atom_core.drawing import drawCross2D
from atom_core.utilities import atomError, compareAtomTransforms


from colorama import init as colorama_init
colorama_init(autoreset=True)

np.set_printoptions(precision=3, suppress=True)

# -------------------------------------------------------------------------------
Expand All @@ -59,8 +52,8 @@ def main():
help="Name of coordinate frame of the hand.", type=str, required=True)
ap.add_argument("-bl", "--base_link",
help="Name of coordinate frame of the robot's base.", type=str, required=True)
ap.add_argument("-ctgt", "--compare_to_ground_truth",
help="If the system being calibrated is simulated, directly compare the TFs to the ground truth.", action="store_true")
ap.add_argument("-ctgt", "--compare_to_ground_truth", action="store_true",
help="If the system being calibrated is simulated, directly compare the TFs to the ground truth.")
ap.add_argument("-csf", "--collection_selection_function", default=None, type=str,
help="A string to be evaluated into a lambda function that receives a collection name as input and "
"returns True or False to indicate if the collection should be loaded (and used in the "
Expand All @@ -74,21 +67,27 @@ def main():
help="Hand eye method. One of ['tsai', 'park', 'horaud', 'andreff', 'daniilidis'].", type=str)

args = vars(ap.parse_args())
# camera = args['camera']
# pattern = args['pattern']

# ---------------------------------------
# Dataset loading and preprocessing
# ---------------------------------------
dataset, _ = loadResultsJSON(args["json_file"],
args["collection_selection_function"])

args['remove_partial_detections'] = True # because opencv
# opencv can only process complete detections
args['remove_partial_detections'] = True
dataset = filterCollectionsFromDataset(dataset, args)

dataset_ground_truth = deepcopy(dataset) # make a copy before adding noise
dataset_initial = deepcopy(dataset) # store initial values

# ---------------------------------------
# --- Define selected collection key.
# ---------------------------------------
# We only need to get one collection because optimized transformations are static, which means they are the same for all collections. Let's select the first key in the dictionary and always get that transformation.
selected_collection_key = list(dataset["collections"].keys())[0]
print("Selected collection key is " + str(selected_collection_key))

# ---------------------------------------
# Verifications
# ---------------------------------------
Expand All @@ -112,15 +111,39 @@ def main():
elif args['method_name'] == 'daniilidis':
method = cv2.CALIB_HAND_EYE_DANIILIDIS

# TODO: Check the given hand link is in the chain from base to camera, without any non-static transforms
# Abort it so.
# Check the given hand link is in the chain from base to camera
chain = getChain(from_frame=args['base_link'],
to_frame=dataset['calibration_config']['sensors'][args['camera']]['link'],
transform_pool=dataset['collections'][selected_collection_key]['transforms'])

# ---------------------------------------
# --- Define selected collection key.
# ---------------------------------------
# For the getters we only need to get one collection because optimized transformations are static, which means they are the same for all collections. Let's select the first key in the dictionary and always get that transformation.
selected_collection_key = list(dataset["collections"].keys())[0]
print("Selected collection key is " + str(selected_collection_key))
# chain is a list of dictionaries like this:
# [{'parent': 'forearm_link', 'child': 'wrist_1_link', 'key': 'forearm_link-wrist_1_link'},
# {'parent': 'wrist_1_link', 'child': 'wrist_2_link', 'key': 'wrist_1_link-wrist_2_link'}, ... ]

hand_frame_in_chain = False
for transform in chain:

if args['hand_link'] == transform['parent'] or args['hand_link'] == transform['child']:
hand_frame_in_chain = True

if not hand_frame_in_chain:
atomError('Selected hand link ' + Fore.BLUE + args['hand_link'] + Style.RESET_ALL +
' does not belong to the chain from base ' + Fore.BLUE + args['base_link'] +
Style.RESET_ALL + ' to the camera ' +
dataset['calibration_config']['sensors'][args['camera']]['link'])

# Check the hand to camera chain is composed only of fixed transforms
chain = getChain(from_frame=args['hand_link'],
to_frame=dataset['calibration_config']['sensors'][args['camera']]['link'],
transform_pool=dataset['collections'][selected_collection_key]['transforms'])

for transform in chain:
if not dataset['transforms'][transform['key']]['type'] == 'fixed':
atomError('Chain from hand link ' + Fore.BLUE + args['hand_link'] + Style.RESET_ALL +
' to camera link ' + Fore.BLUE +
dataset['calibration_config']['sensors'][args['camera']]['link'] +
Style.RESET_ALL + ' contains non fixed transform ' + Fore.RED +
transform['key'] + Style.RESET_ALL + '. Cannot calibrate.')

# ---------------------------------------
# Pattern configuration
Expand All @@ -146,29 +169,26 @@ def main():
K[2, :] = dataset['sensors'][args['camera']]['camera_info']['K'][6:9]
D[:, 0] = dataset['sensors'][args['camera']]['camera_info']['D'][0:5]

# height = dataset['sensors'][args['camera']]['camera_info']['height']
# width = dataset['sensors'][args['camera']]['camera_info']['width']

# ---------------------------------------
# --- Create lists of transformations for hand eye calibration
# ---------------------------------------
# Hand eye calibration (in a eye-in-hand configuration) has 4 different transformations.
#
# robot-base to end-effector b_T_e (obtained from direct kinematics)
# camera to pattern c_T_p (obtained through the detection of the known pattern)
# end-effector to camera e_T_c (estimated by the calibration)
# robot-base to pattern b_T_p (estimated by the calibration)
# robot-base to hand b_T_h (obtained from direct kinematics)
# camera to pattern c_T_p (obtained through the detection of the known pattern)
# hand to camera h_T_c (estimated by the calibration)
# robot-base to pattern b_T_p (estimated by the calibration)
# We will use this notation throughout the code.

c_T_p_lst = [] # list of camera to pattern 4x4 transforms.
b_T_e_lst = [] # list of base to end-effector 4x4 transforms.
b_T_h_lst = [] # list of base to hand 4x4 transforms.

# Iterate all collections and create the lists of transforms c_T_p and b_T_e
# Iterate all collections and create the lists of transforms c_T_p and b_T_h
# NOTE: cannot test with a single collection. We need at least three. Check
# https://docs.opencv.org/4.x/d9/d0c/group__calib3d.html#ga41b1a8dd70eae371eba707d101729c36
for collection_key, collection in dataset['collections'].items():

# pattern not detected by sensor in collection
# Pattern not detected by sensor in collection
if not collection['labels'][args['pattern']][args['camera']]['detected']:
continue

Expand All @@ -178,23 +198,21 @@ def main():
# we will us the solve pnp function from opencv.
# This requires a np array of the pattern corners 3d coordinates
# and another np array of the correponding 2d pixels.
# 3d coordinates were extracted previously with function getPatternConfig()

# 3d coordinates were extracted previously, so lets go to 2d..
pts_2d = np.ones((number_of_corners, 2), np.float32)
for idx, point in enumerate(collection['labels'][args['pattern']][args['camera']]['idxs']):
pts_2d[idx, 0] = point['x']
pts_2d[idx, 1] = point['y']

retval, rvec, tvec = cv2.solvePnP(pts_3d, pts_2d, K, D)

# retval, rvec, tvec = cv2.solvePnP(pts_3d, corners, K, D)
if not retval:
raise ValueError('solvePnP failed.')
raise atomError('solvePnP failed.')

# Convert to 4x4 transform and add to list
c_T_p = traslationRodriguesToTransform(tvec, rvec)
c_T_p_lst.append(c_T_p)

# Visualize detections on image.
if args['show_images']:
image_gui = getCvImageFromCollectionSensor(
collection_key, 'rgb_hand', dataset)
Expand Down Expand Up @@ -225,39 +243,35 @@ def main():
cv2.destroyWindow(window_name)

# ---------------------------------------
# Get b_T_e using direct kinematics
# Get b_T_h using direct kinematics
# ---------------------------------------
# We will use the atom builtin functions to get the transform directly
b_T_h = getTransform(from_frame=args['base_link'],
to_frame=args['hand_link'],
transforms=collection['transforms'])

b_T_e_lst.append(b_T_h)
b_T_h_lst.append(b_T_h)

# ---------------------------------------
# Run hand eye calibration
# ---------------------------------------

# We will use opencv's function calibrateRobotWorldHandEye()
# We will use opencv's function calibrateHandEye()
# https://docs.opencv.org/4.5.4/d9/d0c/group__calib3d.html#gaebfc1c9f7434196a374c382abf43439b
#
# It requires a list of c_T_p and b_T_e transformations.
# It requires a list of c_T_p and b_T_h transformations.
# Transformations are represented by separate rotation (3x3) and translation (3x1) components, so we will have two lists per transformations.

# NOTE instead of lists we could also use np arrays like this, same result.
# n = len(c_T_p_lst)
# c_T_p_Rs = np.zeros((n, 3, 3))
# c_T_p_ts = np.zeros((n, 3, 1))
# b_T_e_Rs = np.zeros((n, 3, 3))
# b_T_e_ts = np.zeros((n, 3, 1))
# b_T_h_Rs = np.zeros((n, 3, 3))
# b_T_h_ts = np.zeros((n, 3, 1))

c_T_p_Rs = [] # list of rotations for c_T_p.
c_T_p_ts = [] # list of translations for c_T_p
b_T_h_Rs = [] # list of rotations for b_T_e
b_T_h_ts = [] # list of translations for b_T_e

for c_T_p, b_T_h in zip(c_T_p_lst, b_T_e_lst):
b_T_h_Rs = [] # list of rotations for b_T_h
b_T_h_ts = [] # list of translations for b_T_h

for c_T_p, b_T_h in zip(c_T_p_lst, b_T_h_lst):
# --- c_T_p camera to pattern
c_T_p_t, c_T_p_R = matrixToTranslationRotation(c_T_p)
c_T_p_Rs.append(c_T_p_R)
Expand All @@ -271,19 +285,19 @@ def main():
# ---------------------------------------
# Run hand eye calibration using calibrateHandEye
# ---------------------------------------

result = cv2.calibrateHandEye(b_T_h_Rs,
b_T_h_ts,
c_T_p_Rs,
c_T_p_ts,
method=method)
h_T_c_R, h_T_c_t = result
h_T_c_R, h_T_c_t = cv2.calibrateHandEye(b_T_h_Rs,
b_T_h_ts,
c_T_p_Rs,
c_T_p_ts,
method=method)

# Rotations out of calibrateRobotWorldHandEye are 3x3
h_T_c = translationRotationToTransform(h_T_c_t, h_T_c_R)

# Extract the transformation marked for calibration,
# we assume calibration parent (cp) and calibration child (cc) and use:
# Extract the transformation marked for calibration which is the
# cp_T_cc, where cp (calibration parent) and cc (calibration child).
# So we need to get cp_T_cc from the estimated h_T_c.
# We use the following:
# h_T_c = h_T_cp * cp_T_cc * cc_T_c
# <=> cp_T_cc = (h_T_cp)-1 * h_T_c * (cc_T_c)-1

Expand All @@ -301,7 +315,7 @@ def main():
cp_T_cc = inv(h_T_cp) @ h_T_c @ inv(cc_T_c)

# Save to dataset
# Since the transformation cp_T_cc is static we will save to all collections
# Since the transformation cp_T_cc is static we will save the same transform to all collections
frame_key = generateKey(calibration_parent, calibration_child)

quat = tf.transformations.quaternion_from_matrix(cp_T_cc)
Expand Down

0 comments on commit 7fa6b43

Please sign in to comment.