Skip to content

Commit

Permalink
some progress on #912, pick up at l.102
Browse files Browse the repository at this point in the history
  • Loading branch information
Kazadhum committed Mar 29, 2024
1 parent e740af9 commit fabd8e1
Showing 1 changed file with 37 additions and 36 deletions.
73 changes: 37 additions & 36 deletions atom_evaluation/scripts/other_calibrations/cv_handeye_calib.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
from atom_evaluation.utilities import atomicTfFromCalibration
from atom_core.atom import getTransform
from atom_core.dataset_io import saveAtomDataset
from atom_core.geometry import traslationRodriguesToTransform


def cvHandEyeCalibrate(objp, dataset, camera, pattern, number_of_corners):
Expand Down Expand Up @@ -67,50 +68,54 @@ def cvHandEyeCalibrate(objp, dataset, camera, pattern, number_of_corners):
undistorted_imgpoints_camera.append(tmp_undistorted_imgpoints_camera)

#####################################
# Get transform from base to cam (Z)
# Get transform from target to cam
#####################################

# Hard coded for now
tmp_transforms = dataset['collections']['006']['transforms']
base_T_cam = getTransform('base_link', 'rgb_world_link', tmp_transforms)
print(base_T_cam)
exit(0)
# NOTE: The documentation calls this the transform from world to cam because in their example (which is eye-in-hand), their pattern's frame is their world frame. This is not the case in ours. So when the documentation mentions the world to cam tf, it corresponds to our pattern/target to camera tf. (I think.)

#####################################
# Get transform from
# Use solvePnP() to get this transformation

print('Calculating transform from camera to pattern for collection 006....')

_, rvec, tvec = cv2.solvePnP(objp, undistorted_imgpoints_camera[0], K, D)

print(f'rvec = {rvec}')

print(f'tvec = {tvec}')

# Convert it into an homogenous transform
cam_optical_frame_T_pattern = traslationRodriguesToTransform(tvec,rvec)

########################################################################
print(f'Transform from the camera\'s optical frame to the pattern frame = {cam_optical_frame_T_pattern}')

# Get tf from camera's optical frame to camera frame
tmp_transforms = dataset['collections']['006']['transforms']
cam_T_cam_optical_frame = getTransform('rgb_world_link','rgb_world_optical_frame', tmp_transforms)

# print('\n---------------------\n Starting stereo calibration ...')
# # Extrinsic stereo calibration
# stereocalib_criteria = (cv2.TERM_CRITERIA_MAX_ITER +
# cv2.TERM_CRITERIA_EPS, 100, 1e-5)
# flags = (cv2.CALIB_USE_INTRINSIC_GUESS)
print(f'Transform from cam to cam optical = {cam_T_cam_optical_frame}')

# ret, K_l, D_l, K_r, D_r, R, T, E, F = cv2.stereoCalibrate(objpoints, imgpoints_l,
# imgpoints_r, K_l, D_l, K_r,
# D_r, image_size,
# criteria=stereocalib_criteria, flags=flags)
cam_T_pattern = np.dot(cam_T_cam_optical_frame, cam_optical_frame_T_pattern)

print('\n---------------------\n Done!\n\n------\nCalibration results:\n------\n')
print(f'Transform from camera to pattern = {cam_T_pattern}')

print('K_left', K_l)
print('D_left', D_l)
print('K_right', K_r)
print('D_right', D_r)
print('R', R)
print('T', T)
print('E', E)
print('F', F)

camera_model = dict([('K_l', K_l), ('K_r', K_r), ('D_l', D_l),
('D_r', D_r), ('R', R), ('T', T),
('E', E), ('F', F)])
# I think I need to invert the transform matrix


cv2.destroyAllWindows()
return camera_model
################################################
# Get transform from base to gripper
################################################

# Hard coded for now, testin w/ one collection before iterating through all collections
base_T_gripper = getTransform('base_link', 'flange', tmp_transforms)
# print(base_T_gripper)

################################################
# Calibrate
################################################
exit(0)

cv2.calibrateRobotWorldHandEye()


def getPatternConfig(dataset, pattern):
Expand Down Expand Up @@ -154,10 +159,6 @@ def main():

# Remove partial detections (OpenCV does not support them)
collections_to_delete = []

# TODO only works for first pattern -------> Changed it to calibrate w.r.t. a pattern passed as an argument
# ^
# first_pattern_key = list(dataset['calibration_config']['calibration_patterns'].keys())[0]----------'

number_of_corners = int(nx) * int(ny)
for collection_key, collection in dataset['collections'].items():
Expand Down

0 comments on commit fabd8e1

Please sign in to comment.