From 4976d11a4dc126facb033a27514cd86acfa312d8 Mon Sep 17 00:00:00 2001 From: Miguel Oliveira Date: Sat, 4 May 2024 18:24:58 +0000 Subject: [PATCH] #912 fix --- atom_core/src/atom_core/geometry.py | 34 ++ .../other_calibrations/cv_eye_in_hand.py | 367 ++++++++++++++ .../other_calibrations/cv_handeye_calib.py | 452 +++++++++-------- .../other_calibrations/test_cv_hand_eye.py | 17 + .../rihbot_calibration/calibration/config.yml | 2 +- .../calibration/summary.pdf | Bin 12583 -> 12578 bytes .../launch/collect_data.launch | 4 +- .../rihbot_calibration/launch/playbag.launch | 4 +- .../launch/set_initial_estimate.launch | 4 +- .../rihbot_calibration/rviz/collect_data.rviz | 459 ++++++++++++------ 10 files changed, 994 insertions(+), 349 deletions(-) create mode 100644 atom_evaluation/scripts/other_calibrations/cv_eye_in_hand.py create mode 100644 atom_evaluation/scripts/other_calibrations/test_cv_hand_eye.py diff --git a/atom_core/src/atom_core/geometry.py b/atom_core/src/atom_core/geometry.py index a486022b..5db0aec9 100644 --- a/atom_core/src/atom_core/geometry.py +++ b/atom_core/src/atom_core/geometry.py @@ -98,6 +98,40 @@ def rodriguesToMatrix(r): return matrix[0] +def matrixToTranslationRotation(matrix): + + R = matrix[0:3, 0:3] + + tvec = np.zeros((3, 1), dtype=float) + tvec[0, 0] = matrix[0, 3] + tvec[1, 0] = matrix[1, 3] + tvec[2, 0] = matrix[2, 3] + + return tvec, R + + +def matrixToTranslationRodrigues(matrix): + + rvec = matrixToRodrigues(matrix[0:3, 0:3]) + + tvec = np.zeros((1, 3), dtype=float) + tvec[0, 0] = matrix[0, 3] + tvec[0, 1] = matrix[1, 3] + tvec[0, 2] = matrix[2, 3] + + return tvec, rvec + + +def translationRotationToTransform(translation, rotation): + T = np.zeros((4, 4), dtype=float) + T[0:3, 0:3] = rotation + T[0, 3] = translation[0, 0] + T[1, 3] = translation[1, 0] + T[2, 3] = translation[2, 0] + T[3, 3] = 1 + return T + + def traslationRodriguesToTransform(translation, rodrigues): R = rodriguesToMatrix(rodrigues) T = np.zeros((4, 4), dtype=float) diff --git a/atom_evaluation/scripts/other_calibrations/cv_eye_in_hand.py b/atom_evaluation/scripts/other_calibrations/cv_eye_in_hand.py new file mode 100644 index 00000000..84b82111 --- /dev/null +++ b/atom_evaluation/scripts/other_calibrations/cv_eye_in_hand.py @@ -0,0 +1,367 @@ +#!/usr/bin/env python3 + +""" +Stereo calibration from opencv +""" + +# ------------------------------------------------------------------------------- +# --- IMPORTS +# ------------------------------------------------------------------------------- + +from copy import deepcopy +import math +from colorama import Fore +import numpy as np +import cv2 +import argparse +import os +from numpy.linalg import inv + +from matplotlib import cm +from prettytable import PrettyTable +import tf + +# from atom.atom_core.src.atom_core.geometry import matrixToTranslationRodrigues +from atom_calibration.calibration.visualization import getCvImageFromCollectionSensor +from atom_core.atom import getTransform +from atom_core.dataset_io import filterCollectionsFromDataset, loadResultsJSON, saveAtomDataset +from atom_core.geometry import matrixToTranslationRotation, translationRotationToTransform, traslationRodriguesToTransform +from atom_core.naming import generateKey +from atom_core.transformations import compareTransforms +from atom_core.utilities import atomError, compareAtomTransforms, getNumberQualifier, printComparisonToGroundTruth +from atom_core.vision import projectToCamera +from atom_core.drawing import drawCross2D + + +from colorama import init as colorama_init +colorama_init(autoreset=True) + +np.set_printoptions(precision=3, suppress=True) + +# ------------------------------------------------------------------------------- +# FUNCTIONS +# ------------------------------------------------------------------------------- + + +def main(): + + # --------------------------------------- + # Command line arguments + # --------------------------------------- + ap = argparse.ArgumentParser() + ap.add_argument("-json", "--json_file", help="Json file containing train input dataset.", type=str, + required=True) + ap.add_argument("-c", "--camera", help="Camera sensor name.", + type=str, required=True) + ap.add_argument("-p", "--pattern", + help="Pattern to be used for calibration.", type=str, required=True) + ap.add_argument("-hl", "--hand_link", + help="Name of coordinate frame of the hand.", type=str, required=True) + ap.add_argument("-bl", "--base_link", + help="Name of coordinate frame of the robot's base.", type=str, required=True) + ap.add_argument("-ctgt", "--compare_to_ground_truth", + help="If the system being calibrated is simulated, directly compare the TFs to the ground truth.", action="store_true") + ap.add_argument("-csf", "--collection_selection_function", default=None, type=str, + help="A string to be evaluated into a lambda function that receives a collection name as input and " + "returns True or False to indicate if the collection should be loaded (and used in the " + "optimization). The Syntax is lambda name: f(x), where f(x) is the function in python " + "language. Example: lambda name: int(name) > 5 , to load only collections 6, 7, and onward.") + ap.add_argument("-uic", "--use_incomplete_collections", action="store_true", default=False, + help="Remove any collection which does not have a detection for all sensors.", ) + ap.add_argument("-si", "--show_images", action="store_true", + default=False, help="shows images for each camera") + ap.add_argument("-mn", "--method_name", required=False, default='tsai', + help="Hand eye method. One of ['tsai', 'park', 'horaud', 'andreff', 'daniilidis'].", type=str) + + args = vars(ap.parse_args()) + # camera = args['camera'] + # pattern = args['pattern'] + + # --------------------------------------- + # Dataset loading and preprocessing + # --------------------------------------- + dataset, _ = loadResultsJSON(args["json_file"], + args["collection_selection_function"]) + + args['remove_partial_detections'] = True # because opencv + dataset = filterCollectionsFromDataset(dataset, args) + + dataset_ground_truth = deepcopy(dataset) # make a copy before adding noise + dataset_initial = deepcopy(dataset) # store initial values + + # --------------------------------------- + # Verifications + # --------------------------------------- + + # Check that the camera has rgb modality + if not dataset['sensors'][args['camera']]['modality'] == 'rgb': + atomError('Sensor ' + args['camera'] + ' is not of rgb modality.') + + available_methods = ['tsai', 'park', 'horaud', 'andreff', 'daniilidis'] + if args['method_name'] not in available_methods: + atomError('Unknown method. Select one from ' + str(available_methods)) + + if args['method_name'] == 'tsai': + method = cv2.CALIB_HAND_EYE_TSAI + elif args['method_name'] == 'park': + method = cv2.CALIB_HAND_EYE_PARK + elif args['method_name'] == 'horaud': + method = cv2.CALIB_HAND_EYE_HORAUD + elif args['method_name'] == 'andreff ': + method = cv2.CALIB_HAND_EYE_ANDREFF + elif args['method_name'] == 'daniilidis': + method = cv2.CALIB_HAND_EYE_DANIILIDIS + + # TODO: Check the given hand link is in the chain from base to camera, without any non-static transforms + # Abort it so. + + # --------------------------------------- + # --- Define selected collection key. + # --------------------------------------- + # For the getters we only need to get one collection because optimized transformations are static, which means they are the same for all collections. Let's select the first key in the dictionary and always get that transformation. + selected_collection_key = list(dataset["collections"].keys())[0] + print("Selected collection key is " + str(selected_collection_key)) + + # --------------------------------------- + # Pattern configuration + # --------------------------------------- + nx = dataset['calibration_config']['calibration_patterns'][args['pattern'] + ]['dimension']['x'] + ny = dataset['calibration_config']['calibration_patterns'][args['pattern'] + ]['dimension']['y'] + square = dataset['calibration_config']['calibration_patterns'][args['pattern']]['size'] + pts_3d = np.zeros((nx * ny, 3), np.float32) + # set of coordinates (w.r.t. the pattern frame) of the corners + pts_3d[:, :2] = square * np.mgrid[0:nx, 0:ny].T.reshape(-1, 2) + number_of_corners = int(nx) * int(ny) + + # --------------------------------------- + # --- Get intrinsic data for the sensor + # --------------------------------------- + # Source sensor + K = np.zeros((3, 3), np.float32) + D = np.zeros((5, 1), np.float32) + K[0, :] = dataset['sensors'][args['camera']]['camera_info']['K'][0:3] + K[1, :] = dataset['sensors'][args['camera']]['camera_info']['K'][3:6] + K[2, :] = dataset['sensors'][args['camera']]['camera_info']['K'][6:9] + D[:, 0] = dataset['sensors'][args['camera']]['camera_info']['D'][0:5] + + # height = dataset['sensors'][args['camera']]['camera_info']['height'] + # width = dataset['sensors'][args['camera']]['camera_info']['width'] + + # --------------------------------------- + # --- Create lists of transformations for hand eye calibration + # --------------------------------------- + # Hand eye calibration (in a eye-in-hand configuration) has 4 different transformations. + # + # robot-base to end-effector b_T_e (obtained from direct kinematics) + # camera to pattern c_T_p (obtained through the detection of the known pattern) + # end-effector to camera e_T_c (estimated by the calibration) + # robot-base to pattern b_T_p (estimated by the calibration) + # We will use this notation throughout the code. + + c_T_p_lst = [] # list of camera to pattern 4x4 transforms. + b_T_e_lst = [] # list of base to end-effector 4x4 transforms. + + # Iterate all collections and create the lists of transforms c_T_p and b_T_e + # NOTE: cannot test with a single collection. We need at least three. Check + # https://docs.opencv.org/4.x/d9/d0c/group__calib3d.html#ga41b1a8dd70eae371eba707d101729c36 + for collection_key, collection in dataset['collections'].items(): + + # pattern not detected by sensor in collection + if not collection['labels'][args['pattern']][args['camera']]['detected']: + continue + + # --------------------------------------- + # Get c_T_p using pattern detection + # --------------------------------------- + # we will us the solve pnp function from opencv. + # This requires a np array of the pattern corners 3d coordinates + # and another np array of the correponding 2d pixels. + # 3d coordinates were extracted previously with function getPatternConfig() + + pts_2d = np.ones((number_of_corners, 2), np.float32) + for idx, point in enumerate(collection['labels'][args['pattern']][args['camera']]['idxs']): + pts_2d[idx, 0] = point['x'] + pts_2d[idx, 1] = point['y'] + + retval, rvec, tvec = cv2.solvePnP(pts_3d, pts_2d, K, D) + + # retval, rvec, tvec = cv2.solvePnP(pts_3d, corners, K, D) + if not retval: + raise ValueError('solvePnP failed.') + + # Convert to 4x4 transform and add to list + c_T_p = traslationRodriguesToTransform(tvec, rvec) + c_T_p_lst.append(c_T_p) + + if args['show_images']: + image_gui = getCvImageFromCollectionSensor( + collection_key, 'rgb_hand', dataset) + + # Four 3d points: origin + axis tips + axis_in_pattern = np.float32([[0, 0, 0], + [0.2, 0, 0], + [0, 0.2, 0], + [0, 0, 0.2]]) + + # Project to camera + axis_in_camera, _ = cv2.projectPoints(axis_in_pattern, rvec, tvec, + K, D) + + # draw x axes + origin = tuple(axis_in_camera[0][0].astype(int)) + x_tip = tuple(axis_in_camera[1][0].astype(int)) + y_tip = tuple(axis_in_camera[2][0].astype(int)) + z_tip = tuple(axis_in_camera[3][0].astype(int)) + image_gui = cv2.line(image_gui, origin, x_tip, + (0, 0, 255), 5) # opencv is BGR + image_gui = cv2.line(image_gui, origin, y_tip, (0, 255, 0), 5) + image_gui = cv2.line(image_gui, origin, z_tip, (255, 0, 0), 5) + + window_name = 'Collection ' + collection_key + cv2.imshow(window_name, image_gui) + cv2.waitKey(0) + cv2.destroyWindow(window_name) + + # --------------------------------------- + # Get b_T_e using direct kinematics + # --------------------------------------- + # We will use the atom builtin functions to get the transform directly + b_T_h = getTransform(from_frame=args['base_link'], + to_frame=args['hand_link'], + transforms=collection['transforms']) + + b_T_e_lst.append(b_T_h) + + # --------------------------------------- + # Run hand eye calibration + # --------------------------------------- + + # We will use opencv's function calibrateRobotWorldHandEye() + # https://docs.opencv.org/4.5.4/d9/d0c/group__calib3d.html#gaebfc1c9f7434196a374c382abf43439b + # + # It requires a list of c_T_p and b_T_e transformations. + # Transformations are represented by separate rotation (3x3) and translation (3x1) components, so we will have two lists per transformations. + + # NOTE instead of lists we could also use np arrays like this, same result. + # n = len(c_T_p_lst) + # c_T_p_Rs = np.zeros((n, 3, 3)) + # c_T_p_ts = np.zeros((n, 3, 1)) + # b_T_e_Rs = np.zeros((n, 3, 3)) + # b_T_e_ts = np.zeros((n, 3, 1)) + + c_T_p_Rs = [] # list of rotations for c_T_p. + c_T_p_ts = [] # list of translations for c_T_p + b_T_h_Rs = [] # list of rotations for b_T_e + b_T_h_ts = [] # list of translations for b_T_e + + for c_T_p, b_T_h in zip(c_T_p_lst, b_T_e_lst): + + # --- c_T_p camera to pattern + c_T_p_t, c_T_p_R = matrixToTranslationRotation(c_T_p) + c_T_p_Rs.append(c_T_p_R) + c_T_p_ts.append(c_T_p_t) + + # --- b_T_h base to hand + b_T_h_t, b_T_h_R = matrixToTranslationRotation(b_T_h) + b_T_h_Rs.append(b_T_h_R) + b_T_h_ts.append(b_T_h_t) + + # --------------------------------------- + # Run hand eye calibration using calibrateHandEye + # --------------------------------------- + + result = cv2.calibrateHandEye(b_T_h_Rs, + b_T_h_ts, + c_T_p_Rs, + c_T_p_ts, + method=method) + h_T_c_R, h_T_c_t = result + + # Rotations out of calibrateRobotWorldHandEye are 3x3 + h_T_c = translationRotationToTransform(h_T_c_t, h_T_c_R) + + # Extract the transformation marked for calibration, + # we assume calibration parent (cp) and calibration child (cc) and use: + # h_T_c = h_T_cp * cp_T_cc * cc_T_c + # <=> cp_T_cc = (h_T_cp)-1 * h_T_c * (cc_T_c)-1 + + calibration_parent = dataset['calibration_config']['sensors'][args['camera']]['parent_link'] + calibration_child = dataset['calibration_config']['sensors'][args['camera']]['child_link'] + + h_T_cp = getTransform(from_frame=args['hand_link'], + to_frame=calibration_parent, + transforms=dataset['collections'][selected_collection_key]['transforms']) + + cc_T_c = getTransform(from_frame=calibration_child, + to_frame=dataset['calibration_config']['sensors'][args['camera']]['link'], + transforms=dataset['collections'][selected_collection_key]['transforms']) + + cp_T_cc = inv(h_T_cp) @ h_T_c @ inv(cc_T_c) + + # Save to dataset + # Since the transformation cp_T_cc is static we will save to all collections + frame_key = generateKey(calibration_parent, calibration_child) + + quat = tf.transformations.quaternion_from_matrix(cp_T_cc) + trans = cp_T_cc[0:3, 3].tolist() + for collection_key, collection in dataset['collections'].items(): + dataset['collections'][collection_key]['transforms'][frame_key]['quat'] = quat + dataset['collections'][collection_key]['transforms'][frame_key]['trans'] = trans + + if args['compare_to_ground_truth']: + + # -------------------------------------------------- + # Compare h_T_c hand to camera transform to ground truth + # -------------------------------------------------- + h_T_c_ground_truth = getTransform(from_frame=args['hand_link'], + to_frame=dataset['calibration_config']['sensors'][args['camera']]['link'], + transforms=dataset['collections'][selected_collection_key]['transforms']) + print(Fore.GREEN + 'Ground Truth h_T_c=\n' + str(h_T_c_ground_truth)) + + print('estimated h_T_c=\n' + str(h_T_c)) + + translation_error, rotation_error, _, _, _, _, _, _ = compareTransforms( + h_T_c, h_T_c_ground_truth) + print('Etrans = ' + str(round(translation_error*1000, 3)) + ' (m)') + print('Erot = ' + str(round(rotation_error*180/math.pi, 3)) + ' (deg)') + + # -------------------------------------------------- + # Compare cp_T_cc calibration_parent_T_calibration_child to ground truth + # -------------------------------------------------- + for sensor_key, sensor in dataset["sensors"].items(): + header = ['Transform', 'Description', 'Et0 [m]', + 'Et [m]', 'Rrot0 [rad]', 'Erot [rad]'] + table = PrettyTable(header) + + transform_key = generateKey( + sensor["calibration_parent"], sensor["calibration_child"]) + row = [transform_key, Fore.BLUE + sensor_key] + + transform_calibrated = dataset['collections'][selected_collection_key]['transforms'][transform_key] + transform_ground_truth = dataset_ground_truth['collections'][ + selected_collection_key]['transforms'][transform_key] + transform_initial = dataset_initial['collections'][selected_collection_key]['transforms'][transform_key] + + translation_error_1, rotation_error_1 = compareAtomTransforms( + transform_initial, transform_ground_truth) + translation_error_2, rotation_error_2 = compareAtomTransforms( + transform_calibrated, transform_ground_truth) + + row.append(round(translation_error_1, 6)) + row.append(round(translation_error_2, 6)) + row.append(round(rotation_error_1, 6)) + row.append(round(rotation_error_2, 6)) + + table.add_row(row) + print(table) + + # Save results to an atom dataset + filename_results_json = os.path.dirname( + args['json_file']) + '/hand_eye_' + args['method_name'] + '_' + args['camera'] + '.json' + saveAtomDataset(filename_results_json, dataset) + + +if __name__ == '__main__': + main() diff --git a/atom_evaluation/scripts/other_calibrations/cv_handeye_calib.py b/atom_evaluation/scripts/other_calibrations/cv_handeye_calib.py index 7a22325a..c160b263 100755 --- a/atom_evaluation/scripts/other_calibrations/cv_handeye_calib.py +++ b/atom_evaluation/scripts/other_calibrations/cv_handeye_calib.py @@ -8,11 +8,13 @@ # --- IMPORTS # ------------------------------------------------------------------------------- +from copy import deepcopy import numpy as np import cv2 import argparse import json import os +from atom_core.transformations import compareTransforms import tf from colorama import Style, Fore @@ -22,7 +24,7 @@ from atom_evaluation.utilities import atomicTfFromCalibration from atom_core.atom import getTransform from atom_core.dataset_io import saveAtomDataset -from atom_core.geometry import traslationRodriguesToTransform +from atom_core.geometry import invertTranslationRodrigues, matrixToRodrigues, traslationRodriguesToTransform from atom_core.vision import projectToCamera from atom_core.drawing import drawCross2D, drawSquare2D # from atom_evaluation.single_rgb_evaluation import getPointsDetectedInImageAsNPArray, getPointsInPatternAsNPArray @@ -145,7 +147,8 @@ def getPointsInPatternAsNPArray(_collection_key, _pattern_key, _sensor_key, _dat pts_in_pattern_list = [] # collect the points for pt_detected in _dataset['collections'][_collection_key]['labels'][_pattern_key][_sensor_key]['idxs']: id_detected = pt_detected['id'] - point = [item for item in _dataset['patterns'][_pattern_key]['corners'] if item['id'] == id_detected][0] + point = [item for item in _dataset['patterns'][_pattern_key] + ['corners'] if item['id'] == id_detected][0] pts_in_pattern_list.append(point) return np.array([[item['x'] for item in pts_in_pattern_list], # convert list to np array @@ -160,145 +163,24 @@ def getPointsDetectedInImageAsNPArray(_collection_key, _pattern_key, _sensor_key [item['y'] for item in _dataset['collections'][_collection_key]['labels'][_pattern_key][_sensor_key]['idxs']]], dtype=float) + def splitTFMatrix(matrix): - #TODO: write a verification of matrix size - + # TODO: write a verification of matrix size + R = matrix[0:3, 0:3] t = (matrix[0:3, 3]).T - return R,t + return R, t + -def joinTFMatrix(R,t): - matrix = np.zeros((4,4)) +def joinTFMatrix(R, t): + matrix = np.zeros((4, 4)) matrix[0:3, 0:3] = R matrix[0:3, 3] = t.T matrix[3, 3] = 1 return matrix -def cvHandEyeCalibrate(objp, dataset, camera, pattern, number_of_corners): - - # --------------------------------------- - # --- Get intrinsic data for the sensor - # --------------------------------------- - # Source sensor - K = np.zeros((3, 3), np.float32) - D = np.zeros((5, 1), np.float32) - K[0, :] = dataset['sensors'][camera]['camera_info']['K'][0:3] - K[1, :] = dataset['sensors'][camera]['camera_info']['K'][3:6] - K[2, :] = dataset['sensors'][camera]['camera_info']['K'][6:9] - D[:, 0] = dataset['sensors'][camera]['camera_info']['D'][0:5] - - height = dataset['sensors'][camera]['camera_info']['height'] - width = dataset['sensors'][camera]['camera_info']['width'] - image_size = (height, width) - - # iterate all collections and create four lists of rotations and translations: - list_cam_optical_frame_T_pattern_R = [] - list_cam_optical_frame_T_pattern_t = [] - list_base_T_gripper_R = [] - list_base_T_gripper_t = [] - - for collection_key, collection in dataset['collections'].items(): - - # --------------------------------------- - # Get transform from target to cam - # --------------------------------------- - - # Arrays to store object points and image points from all the images. - objpoints = [] # 3d point in real world space - imgpoints_camera = [] # 2d points in image plane. - - tmp_imgpoints_camera = np.ones((number_of_corners, 2), np.float32) # temporary var - - - #NOTE: Check labels id (this works because detections are complete) - for idx, point in enumerate(collection['labels'][pattern][camera]['idxs']): - tmp_imgpoints_camera[idx, 0] = point['x'] - tmp_imgpoints_camera[idx, 1] = point['y'] - - imgpoints_camera.append(tmp_imgpoints_camera) - objpoints.append(objp) # Use the points defined in local pattern ref frame - # print('objp=\n'+str(objp)) - - # print(imgpoints_camera) - - # --------------------------------------- - # Get transform from target to cam - # --------------------------------------- - # NOTE: The documentation calls this the transform from world to cam because in their example (which is eye-in-hand), their pattern's frame is their world frame. This is not the case in ours. So when the documentation mentions the world to cam tf, it corresponds to our pattern/target to camera tf. (I think.) - - # Use solvePnP() to get this transformation - # Need to check if these calculations (from here to l. 108) are correct - - print('Calculating transform from camera to pattern for collection ' + collection_key) - - print('K = ' + str(K)) - - print('D = ' + str(D)) - - retval, rvec, tvec = cv2.solvePnP(objp, imgpoints_camera[0], K, D) - - print('SolvePnP retval = ' + str(retval)) - - # print(f'rvec = {rvec}') - # print(f'tvec = {tvec}') - - # Convert it into an homogenous transform - cam_optical_frame_T_pattern = traslationRodriguesToTransform(tvec, rvec) - # cam_optical_frame_T_pattern = np.linalg.inv(cam_optical_frame_T_pattern) - - - print('Optical frame to pattern (from solvePnP) (Collection ' + collection_key + ')') - print(cam_optical_frame_T_pattern) - - # Split into R and t matrices for the calibration function - R, t = splitTFMatrix(cam_optical_frame_T_pattern) - list_cam_optical_frame_T_pattern_R.append(R) - list_cam_optical_frame_T_pattern_t.append(t) - - # --------------------------------------- - # Get transform from base to gripper - # --------------------------------------- - - # NOTE: cannot test with a single collection. We need at least three. Check - # https://docs.opencv.org/4.x/d9/d0c/group__calib3d.html#ga41b1a8dd70eae371eba707d101729c36 - - print('Calculating transform from the base to the gripper for collection ' + collection_key) - - base_T_gripper = getTransform( - from_frame='base_link', - to_frame='flange', - transforms=collection['transforms'] - ) - - # base_T_gripper = np.linalg.inv(base_T_gripper) - print('Base to gripper (from dataset) (Collection ' + collection_key + ')') - print(base_T_gripper) - - # Split into R and t matrices for the calibration function - R,t = splitTFMatrix(base_T_gripper) - list_base_T_gripper_R.append(R) - list_base_T_gripper_t.append(t) - - # --------------------------------------- - # Calibrate - # --------------------------------------- - - # print('list_cam_optical_frame_T_pattern_R = \n' + str(list_cam_optical_frame_T_pattern_R)) - - print('list_cam_optical_frame_T_pattern_t = \n' + str(list_cam_optical_frame_T_pattern_t)) - - # Running with lists of np.arrays - R_base2pattern, t_base2pattern, R_gripper2opticalframe, t_gripper2opticalframe = cv2.calibrateRobotWorldHandEye(list_cam_optical_frame_T_pattern_R, - list_cam_optical_frame_T_pattern_t, - list_base_T_gripper_R, list_base_T_gripper_t - ) - - # print('Testing...') - # print( R_base2pattern, t_base2pattern, R_gripper2cam, t_gripper2cam) - - return R_base2pattern, t_base2pattern, R_gripper2opticalframe, t_gripper2opticalframe def getPatternConfig(dataset, pattern): # Pattern configs @@ -312,73 +194,93 @@ def getPatternConfig(dataset, pattern): return nx, ny, square, inner_square, objp + def getWantedTransformsFromOpenCVHandEyeCalib(dataset, calib_tf_base2pattern, calib_tf_gripper2opticalframe, base_link_name, optical_frame_name, sensor_link_name): world_link = dataset['calibration_config']['world_link'] for collection_key, collection in dataset['collections'].items(): - print("Getting wanted transforms for collection " + collection_key + "...") + print("Getting wanted transforms for collection " + collection_key + "...") - tfs = collection['transforms'] # Dict of tfs from a given collection + tfs = collection['transforms'] # Dict of tfs from a given collection # Getting the world to base transform (bTw) - + print("Getting world to base transform...") tf_world2base = getTransform(world_link, base_link_name, tfs) - tf_world2pattern = np.dot(calib_tf_base2pattern, tf_world2base) # First wanted calib + tf_world2pattern = np.dot( + calib_tf_base2pattern, tf_world2base) # First wanted calib # Get the second transform (gripper to cam) print("Getting gripper to cam transform...") - tf_opticalframe2cam = getTransform("rgb_hand_optical_frame", "rgb_hand_link", tfs) + tf_opticalframe2cam = getTransform( + "rgb_hand_optical_frame", "rgb_hand_link", tfs) # print(tf_opticalframe2cam) # DEBUG - tf_gripper2cam = np.dot(tf_opticalframe2cam, calib_tf_gripper2opticalframe) + tf_gripper2cam = np.dot(tf_opticalframe2cam, + calib_tf_gripper2opticalframe) # print(tf_gripper2cam) return tf_world2pattern, tf_gripper2cam + def calc_tf_opticalframe2pattern(collection_tfs, calib_tf_base2pattern, calib_tf_gripper2opticalframe, base_link_name, gripper_link_name): tf_gripper2base = getTransform(from_frame=gripper_link_name, - to_frame=base_link_name, - transforms=collection_tfs) + to_frame=base_link_name, + transforms=collection_tfs) - calib_tf_opticalframe2gripper = np.linalg.inv(calib_tf_gripper2opticalframe) + calib_tf_opticalframe2gripper = np.linalg.inv( + calib_tf_gripper2opticalframe) calib_tf_opticalframe2pattern = calib_tf_base2pattern @ tf_gripper2base @ calib_tf_opticalframe2gripper # print(calib_tf_opticalframe2pattern) # DEBUG return calib_tf_opticalframe2pattern - + def main(): + + # --------------------------------------- + # Command line arguments + # --------------------------------------- ap = argparse.ArgumentParser() ap.add_argument("-json", "--json_file", help="Json file containing train input dataset.", type=str, required=True) - ap.add_argument("-c", "--camera", help="Camera sensor name.", type=str, required=True) - ap.add_argument("-si", "--show_images", help="If true the script shows images.", action='store_true', default=False) - ap.add_argument("-p", "--pattern", help="Pattern to be used for calibration.", type=str, required=True) - ap.add_argument("-ctgt", "--compare_to_ground_truth", help="If the system being calibrated is simulated, directly compare the TFs to the ground truth.", action="store_true") + ap.add_argument("-c", "--camera", help="Camera sensor name.", + type=str, required=True) + ap.add_argument("-si", "--show_images", help="If true the script shows images.", + action='store_true', default=False) + ap.add_argument("-p", "--pattern", + help="Pattern to be used for calibration.", type=str, required=True) + ap.add_argument("-ctgt", "--compare_to_ground_truth", + help="If the system being calibrated is simulated, directly compare the TFs to the ground truth.", action="store_true") # Save args + + # NOTE it does not make sense to create new variables like the ones in the vars dictionary. + # Might as well use the args directly, it becomes less confusing. args = vars(ap.parse_args()) json_file = args['json_file'] camera = args['camera'] show_images = args['show_images'] pattern = args['pattern'] - # Read json file - f = open(json_file, 'r') - dataset = json.load(f) + hand_link = 'flange' + # hand_link = 'rgb_hand_link' + # --------------------------------------- + # Dataset loading and preprocessing + # --------------------------------------- + # Read json file + dataset = json.load(open(json_file, 'r')) - ######################################### - # DATASET PREPROCESSING - ######################################### + # Get pattern configuration to check if number of detections == nx * ny + nx, ny, _, _, objp = getPatternConfig(dataset=dataset, pattern=pattern) - nx, ny, square, inner_square, objp = getPatternConfig(dataset=dataset, pattern=pattern) + # NOTE why not use the atom functions directly, instead of putting the for loops here? # Remove partial detections (OpenCV does not support them) collections_to_delete = [] @@ -388,7 +290,7 @@ def main(): for sensor_key, sensor in dataset['sensors'].items(): if sensor_key != camera: continue - + if sensor['msg_type'] == 'Image' and collection['labels'][pattern][sensor_key]['detected']: if not len(collection['labels'][pattern][sensor_key]['idxs']) == number_of_corners: print( @@ -405,7 +307,8 @@ def main(): collections_to_delete = [] for collection_key, collection in dataset['collections'].items(): if not collection['labels'][pattern][args['camera']]['detected']: - print('Collection ' + collection_key + ' pattern not detected on camera. Removing...') + print('Collection ' + collection_key + + ' pattern not detected on camera. Removing...') collections_to_delete.append(collection_key) for collection_key in collections_to_delete: @@ -419,37 +322,186 @@ def main(): # For the getters we only need to get one collection because optimized transformations are static, which means they are the same for all collections. Let's select the first key in the dictionary and always get that transformation. selected_collection_key = list(dataset["collections"].keys())[0] print("Selected collection key is " + str(selected_collection_key)) - - - # Compute OpenCV stereo calibration - # Now does not crash!!! - R_base2pattern, t_base2pattern, R_gripper2opticalframe, t_gripper2opticalframe = cvHandEyeCalibrate( - objp=objp, - dataset=dataset, - camera=camera, - pattern=pattern, - number_of_corners=number_of_corners - ) + + # --------------------------------------- + # --- Get intrinsic data for the sensor + # --------------------------------------- + # Source sensor + K = np.zeros((3, 3), np.float32) + D = np.zeros((5, 1), np.float32) + K[0, :] = dataset['sensors'][camera]['camera_info']['K'][0:3] + K[1, :] = dataset['sensors'][camera]['camera_info']['K'][3:6] + K[2, :] = dataset['sensors'][camera]['camera_info']['K'][6:9] + D[:, 0] = dataset['sensors'][camera]['camera_info']['D'][0:5] + + height = dataset['sensors'][camera]['camera_info']['height'] + width = dataset['sensors'][camera]['camera_info']['width'] + image_size = (height, width) + + # --------------------------------------- + # --- Create lists of transformations for hand eye calibration + # --------------------------------------- + + # Hand eye calibration (in a eye-in-hand configuration) has 4 different transformations. + # + # robot-base to end-effector b_T_e (obtained from direct kinematics) + # camera to pattern c_t_p (obtained through the detection of the known pattern) + # end-effector to camera e_T_c (estimated by the calibration) + # robot-base to pattern b_T_p (estimated by the calibration) + # We will use this notation throughout the code. + + # We will use opencv's function calibrateRobotWorldHandEye() + # https://docs.opencv.org/4.5.4/d9/d0c/group__calib3d.html#gaebfc1c9f7434196a374c382abf43439b + # + # It requires a list of c_T_p and b_T_e transformations. + # Transformations are represented by separate rotation and translation components, so we will have two lists per transformations. + + c_T_p_rvecs = [] # list of rotations for c_T_p + c_T_p_tvecs = [] # list of translations for c_T_p + b_T_e_rvecs = [] # list of rotations for b_T_e + b_T_e_tvecs = [] # list of translations for b_T_e + + # Iterate all collections and create the lists. + # NOTE: cannot test with a single collection. We need at least three. Check + # https://docs.opencv.org/4.x/d9/d0c/group__calib3d.html#ga41b1a8dd70eae371eba707d101729c36 + for collection_key, collection in dataset['collections'].items(): + + # --------------------------------------- + # Get c_T_p using pattern detection + # --------------------------------------- + + # we will us the solve pnp function from opencv. + # This requires a np array of the pattern corners 3d coordinates + # and another np array of the correponding 2d pixels. + + pts_3d = objp # already done in a function above. + + # 2d points in image plane. + pts_2d = np.ones((number_of_corners, 2), np.float32) + # NOTE: Check labels id (this only works because detections are complete) + for idx, point in enumerate(collection['labels'][pattern][camera]['idxs']): + pts_2d[idx, 0] = point['x'] + pts_2d[idx, 1] = point['y'] + + retval, rvec, tvec = cv2.solvePnP(pts_3d, pts_2d, K, D) + + if not retval: + raise ValueError('solvePnP failed.') + + c_T_p_rvecs.append(rvec) + c_T_p_tvecs.append(tvec) + + # --------------------------------------- + # Get b_T_e using direct kinematics + # --------------------------------------- + + # We will use the atom builtin functions to get the transform directly + b_T_e = getTransform(from_frame='base_link', + to_frame=hand_link, + transforms=collection['transforms']) + + # NOTE invert + # b_T_e = np.linalg.inv(b_T_e) + + # Split 4x4 transformation into rvec and tvec + rvec = matrixToRodrigues(b_T_e) + tvec = np.asarray(b_T_e[0:3, 3]) + + b_T_e_rvecs.append(rvec) + b_T_e_tvecs.append(tvec) + + # --------------------------------------- + # Run hand eye calibration + # --------------------------------------- + + # NOTE Invert b_T_e + # for idx, (tvec, rvec) in enumerate(zip(b_T_e_tvecs, b_T_e_rvecs)): + # tvec, rvec = invertTranslationRodrigues(tvec, rvec) + # b_T_e_tvecs[idx] = tvec + # b_T_e_rvecs[idx] = rvec + + # # NOTE Invert c_T_p + # for idx, (tvec, rvec) in enumerate(zip(c_T_p_tvecs, c_T_p_rvecs)): + # tvec, rvec = invertTranslationRodrigues(tvec, rvec) + # c_T_p_tvecs[idx] = tvec + # c_T_p_rvecs[idx] = rvec + + # result = cv2.calibrateRobotWorldHandEye(c_T_p_rvecs, + # c_T_p_tvecs, + # b_T_e_rvecs, + # b_T_e_tvecs) + # result = cv2.calibrateRobotWorldHandEye(b_T_e_rvecs, + # b_T_e_tvecs, + # c_T_p_rvecs, + # c_T_p_tvecs) + # b_T_p_rvec, b_T_p_tvec, e_T_c_rvec, e_T_c_tvec = result + + result = cv2.calibrateHandEye(b_T_e_rvecs, + b_T_e_tvecs, + c_T_p_rvecs, + c_T_p_tvecs, method=cv2.CALIB_HAND_EYE_PARK) + e_T_c_rvec, e_T_c_tvec = result + + # Convert from rvec tvec to 4x4 transformations + # b_T_p = traslationRodriguesToTransform(b_T_p_tvec, b_T_p_rvec) + e_T_c = traslationRodriguesToTransform(e_T_c_tvec, e_T_c_rvec) + + # --------------------------------------- + # Test calibration + # --------------------------------------- + + e_T_c_ground_truth = getTransform( + from_frame=hand_link, + to_frame="rgb_hand_optical_frame", + transforms=dataset['collections'][selected_collection_key]['transforms']) + + print('estimated e_T_c=\n' + str(e_T_c)) + print('ground truth e_T_c=\n' + str(e_T_c_ground_truth)) + + np.linalg.inv(e_T_c) + np.linalg.inv(e_T_c_ground_truth) + + translation_error, rotation_error, _, _, _, _, _, _ = compareTransforms( + e_T_c_ground_truth, e_T_c) + print('rotation error =' + str(rotation_error)) + print('translation_error =' + str(translation_error)) + + exit(0) + + # Compare against the transformation stored in the dataset as ground truth + + # print('Testing...') + # print(b_T_p_R, b_T_p_t, R_gripper2cam, t_gripper2cam) + + # # Compute OpenCV stereo calibration + # b_T_p_R, b_T_p_t, e_T_c_R, e_T_c_t = cvHandEyeCalibrate( + # objp=objp, + # dataset=dataset, + # camera=camera, + # pattern=pattern, + # number_of_corners=number_of_corners + # ) ######################################## # Get calibrated homogenous tfs ######################################## - calib_tf_base2pattern = joinTFMatrix(R_base2pattern, t_base2pattern) + calib_tf_base2pattern = joinTFMatrix(b_T_p_rvec, b_T_p_tvec) print('Calibrated TF from the base to the pattern:') print(calib_tf_base2pattern) - - calib_tf_gripper2opticalframe = joinTFMatrix(R_gripper2opticalframe, t_gripper2opticalframe) + + calib_tf_gripper2opticalframe = joinTFMatrix( + e_T_c_rvec, e_T_c_tvec) print('Gripper to OF:') print(calib_tf_gripper2opticalframe) - - dataset_tf_gripper2opticalframe=getTransform( + + dataset_tf_gripper2opticalframe = getTransform( from_frame="flange", to_frame="rgb_hand_optical_frame", - transforms=dataset['collections'][selected_collection_key]['transforms'] - ) + transforms=dataset['collections'][selected_collection_key]['transforms'] + ) print('Gripper to OF (from dataset):') print(dataset_tf_gripper2opticalframe) @@ -457,23 +509,22 @@ def main(): # After this script is done, check if this is still needed calib_tf_world2pattern, calib_tf_gripper2cam = getWantedTransformsFromOpenCVHandEyeCalib( - dataset = dataset, - calib_tf_base2pattern = calib_tf_base2pattern, - calib_tf_gripper2opticalframe = calib_tf_gripper2opticalframe, - base_link_name = "base_link", - optical_frame_name = camera + "_optical_frame", - sensor_link_name = camera + "_link") - + dataset=dataset, + calib_tf_base2pattern=calib_tf_base2pattern, + calib_tf_gripper2opticalframe=calib_tf_gripper2opticalframe, + base_link_name="base_link", + optical_frame_name=camera + "_optical_frame", + sensor_link_name=camera + "_link") - # Now that we have the tfs, we just need to compare these to the GT (if simulated) + # Now that we have the tfs, we just need to compare these to the GT (if simulated) if args['compare_to_ground_truth']: - + # Initialize errors dict e = {} for collection_key, collection in dataset['collections'].items(): # Usually this is where collections with incomplete detections would be filtered, but I don't believe it is needed since these are deleted from the dataset object previously - + e[collection_key] = {} for pattern_key, pattern in dataset['calibration_config']['calibration_patterns'].items(): @@ -481,33 +532,38 @@ def main(): e[collection_key][pattern_key] = {} # This is similar to what happens in single_rgb_evaluation, but we don't iterate through the list of sensors, since the sensor we aim to calibrate is given as an argument - + # Get the pattern corners in the local pattern frame. Must use only corners which have ----------------- # correspondence to the detected points stored in collection['labels'][sensor_key]['idxs'] ------------- - pts_in_pattern = getPointsInPatternAsNPArray(collection_key, pattern_key, camera, dataset) + pts_in_pattern = getPointsInPatternAsNPArray( + collection_key, pattern_key, camera, dataset) - # Transform the pts from the pattern's reference frame to the sensor's reference frame ----------------- + # Transform the pts from the pattern's reference frame to the sensor's reference frame ----------------- # Calc tf_camera2pattern calib_tf_opticalframe2pattern = calc_tf_opticalframe2pattern( - collection_tfs = collection['transforms'], - calib_tf_base2pattern = calib_tf_base2pattern, - calib_tf_gripper2opticalframe = calib_tf_gripper2opticalframe, - base_link_name = "base_link", - gripper_link_name = "flange" + collection_tfs=collection['transforms'], + calib_tf_base2pattern=calib_tf_base2pattern, + calib_tf_gripper2opticalframe=calib_tf_gripper2opticalframe, + base_link_name="base_link", + gripper_link_name="flange" ) - pts_in_sensor = np.dot(calib_tf_opticalframe2pattern, pts_in_pattern) + pts_in_sensor = np.dot( + calib_tf_opticalframe2pattern, pts_in_pattern) # print(pts_in_sensor) # Project points to the image of the sensor - + w, h = collection['data'][sensor_key]['width'], collection['data'][sensor_key]['height'] - K = np.ndarray((3, 3), buffer=np.array(sensor['camera_info']['K']), dtype=float) - D = np.ndarray((5, 1), buffer=np.array(sensor['camera_info']['D']), dtype=float) + K = np.ndarray((3, 3), buffer=np.array( + sensor['camera_info']['K']), dtype=float) + D = np.ndarray((5, 1), buffer=np.array( + sensor['camera_info']['D']), dtype=float) - pts_in_image, _, _ = projectToCamera(K, D, w, h, pts_in_sensor[0:3, :]) + pts_in_image, _, _ = projectToCamera( + K, D, w, h, pts_in_sensor[0:3, :]) # print(pts_in_image) @@ -538,7 +594,8 @@ def main(): cmap = cm.gist_rainbow(np.linspace(0, 1, nx * ny)) for x, y in zip(xs, ys): - drawCross2D(image, x, y, 15, color=(255, 0, 0), thickness=1) + drawCross2D(image, x, y, 15, color=( + 255, 0, 0), thickness=1) # drawSquare2D(image, x_t, y_t, 6, color=color, thickness=1) @@ -547,9 +604,6 @@ def main(): cv2.waitKey(0) - - - if __name__ == '__main__': main() @@ -592,4 +646,4 @@ def main(): # # Save results to a json file # filename_results_json = os.path.dirname(json_file) + '/cv_calibration.json' - # saveAtomDataset(filename_results_json, dataset) \ No newline at end of file + # saveAtomDataset(filename_results_json, dataset) diff --git a/atom_evaluation/scripts/other_calibrations/test_cv_hand_eye.py b/atom_evaluation/scripts/other_calibrations/test_cv_hand_eye.py new file mode 100644 index 00000000..5c4fa47e --- /dev/null +++ b/atom_evaluation/scripts/other_calibrations/test_cv_hand_eye.py @@ -0,0 +1,17 @@ +import cv2 +import numpy as np + +eye_coords = np.array([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], + [1.0, 1.0, 0.0], [1.0, 0.0, 0.0]]) +hand_coords = np.array([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [ + 1.0, 1.0, 0.0], [1.0, 0.0, 0.0]]) +# It is the rotation matrix between the hand and eye +R_target2cam = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [ + 0.0, 0.0, 1.0], [0.0, 0.0, 0.0]]) +# It is the translation between hand and eye + +x_target2cam = np.array([0.0, 0.0, 0.0, 0.0]) +# It is code for 3*4 matrix transformation +X, _ = cv2.calibrateHandEye(hand_coords, eye_coords, + R_target2cam, t_target2cam) +print(X) diff --git a/atom_examples/rihbot/rihbot_calibration/calibration/config.yml b/atom_examples/rihbot/rihbot_calibration/calibration/config.yml index 4bb0c680..dc0b2c57 100644 --- a/atom_examples/rihbot/rihbot_calibration/calibration/config.yml +++ b/atom_examples/rihbot/rihbot_calibration/calibration/config.yml @@ -35,7 +35,7 @@ description_file: "package://rihbot_description/urdf/robot.urdf.xacro" # The calibration framework requires a bagfile to extract the necessary data for the calibration. -bag_file: "$ROS_BAGS/rihbot/bag_diogo.bag" +bag_file: "$ROS_BAGS/rihbot/train_test_opencv.bag" # You must define a frame of reference for the optimization process. # It must exist in the transformation chains of all the sensors which are being calibrated. diff --git a/atom_examples/rihbot/rihbot_calibration/calibration/summary.pdf b/atom_examples/rihbot/rihbot_calibration/calibration/summary.pdf index 55b7df95838d61c4b7f608873c2d9a83c0529b91..0b7f301e9d676fd1e0fbfc7f6119e95b5829a4f4 100644 GIT binary patch delta 39 ucmZ3Uv?ysqhCY|6fw7^Hsj-o9)aG*ia7HFmi^&%Z{5cG{R8?L5-M9eu>I(D# delta 44 zcmZ3Kv^;4;hCaWEfu*6LiLsH1wt=C#fx+e?{cuJmGvmpp4g5LGxKveL{oS|#BhL%1 diff --git a/atom_examples/rihbot/rihbot_calibration/launch/collect_data.launch b/atom_examples/rihbot/rihbot_calibration/launch/collect_data.launch index 297a7d2e..2b62f008 100644 --- a/atom_examples/rihbot/rihbot_calibration/launch/collect_data.launch +++ b/atom_examples/rihbot/rihbot_calibration/launch/collect_data.launch @@ -25,7 +25,7 @@ @arg marker_size The size of the interaction marker that is used to trigger a data save. @arg bag_file Absolute path to the playing bag. - default: $(env ROS_BAGS)/rihbot/bag_diogo.bag + default: $(env ROS_BAGS)/rihbot/train_test_opencv.bag @arg bag_start Playback starting time (in seconds). default: 0.0 @arg bag_rate Playback rate. default: 1.0 --> @@ -43,7 +43,7 @@ - + diff --git a/atom_examples/rihbot/rihbot_calibration/launch/playbag.launch b/atom_examples/rihbot/rihbot_calibration/launch/playbag.launch index 44d49f6f..fc93fad1 100644 --- a/atom_examples/rihbot/rihbot_calibration/launch/playbag.launch +++ b/atom_examples/rihbot/rihbot_calibration/launch/playbag.launch @@ -22,7 +22,7 @@ Sets up image decompressors if needed, reads the urdf robot description. @arg bag_file Absolute path to the playing bag. - default: $(env ROS_BAGS)/rihbot/bag_diogo.bag + default: $(env ROS_BAGS)/rihbot/train_test_opencv.bag @arg bag_start Playback starting time (in seconds). default: 0.0 @arg bag_rate Playback rate. default: 1.0 @arg use_atom_bag_file Uses the bag file to complement the transformations generated by the urdf/joint_states/robot_state_publisher. Used in cases like softbot. default: False @@ -30,7 +30,7 @@ - + diff --git a/atom_examples/rihbot/rihbot_calibration/launch/set_initial_estimate.launch b/atom_examples/rihbot/rihbot_calibration/launch/set_initial_estimate.launch index dc0636ed..a31da040 100644 --- a/atom_examples/rihbot/rihbot_calibration/launch/set_initial_estimate.launch +++ b/atom_examples/rihbot/rihbot_calibration/launch/set_initial_estimate.launch @@ -22,7 +22,7 @@ Rviz interactive markers are used to set the pose of the sensors. @arg bag_file Absolute path to the playing bag. - default: $(env ROS_BAGS)/rihbot/bag_diogo.bag + default: $(env ROS_BAGS)/rihbot/train_test_opencv.bag @arg bag_start Playback starting time (in seconds). default: 0.0 @arg bag_rate Playback rate. default: 1.0 @arg marker_size Size of the markers. default: 0.5 @@ -36,7 +36,7 @@ - + diff --git a/atom_examples/rihbot/rihbot_calibration/rviz/collect_data.rviz b/atom_examples/rihbot/rihbot_calibration/rviz/collect_data.rviz index 517e322c..fadba655 100644 --- a/atom_examples/rihbot/rihbot_calibration/rviz/collect_data.rviz +++ b/atom_examples/rihbot/rihbot_calibration/rviz/collect_data.rviz @@ -1,114 +1,285 @@ Panels: -- Class: rviz/Displays - Help Height: 0 - Name: Displays - Property Tree Widget: + - Class: rviz/Displays + Help Height: 0 + Name: Displays + Property Tree Widget: + Expanded: + - /Global Options1 + Splitter Ratio: 0.763005793094635 + Tree Height: 347 + - Class: rviz/Selection + Name: Selection + - Class: rviz/Tool Properties Expanded: - - /Global Options1 - - /base_camera-Image1 - Splitter Ratio: 0.763005793094635 - Tree Height: 523 -- Class: rviz/Selection - Name: Selection -- Class: rviz/Tool Properties - Expanded: - - /2D Pose Estimate1 - - /2D Nav Goal1 - - /Publish Point1 - Name: Tool Properties - Splitter Ratio: 0.5886790156364441 -- Class: rviz/Views - Expanded: - - /Current View1 - Name: Views - Splitter Ratio: 0.5 -- Class: rviz/Time - Experimental: false - Name: Time - SyncMode: 0 - SyncSource: '' + - /2D Pose Estimate1 + - /2D Nav Goal1 + - /Publish Point1 + Name: Tool Properties + Splitter Ratio: 0.5886790156364441 + - Class: rviz/Views + Expanded: + - /Current View1 + Name: Views + Splitter Ratio: 0.5 + - Class: rviz/Time + Experimental: false + Name: Time + SyncMode: 0 + SyncSource: rgb_hand-Camera Preferences: PromptSaveOnExit: true Toolbars: toolButtonStyle: 2 Visualization Manager: - Class: '' + Class: "" Displays: - - Alpha: 0.5 - Cell Size: 1 - Class: rviz/Grid - Color: 160; 160; 164 - Enabled: true - Line Style: - Line Width: 0.02 - Value: Lines - Name: Grid - Normal Cell Count: 0 - Offset: - X: 0 - Y: 0 - Z: 0 - Plane: XY - Plane Cell Count: 10 - Reference Frame: world - Value: true - - Class: rviz/TF - Enabled: true - Frame Timeout: 15 - Frames: - All Enabled: true - Marker Scale: 1 - Name: TF - Show Arrows: false - Show Axes: true - Show Names: false - Tree: {} - Update Interval: 0 - Value: false - - Class: rviz/RobotModel - Collision Enabled: false - Enabled: true - Links: {} - Name: RobotModel - Robot Description: robot_description - TF Prefix: '' - Update Interval: 0 - Value: true - Visual Enabled: true - - Class: rviz/InteractiveMarkers - Enable Transparency: true - Enabled: true - Name: ManualDataLabeler-InteractiveMarkers - Show Axes: true - Show Descriptions: true - Show Visual Aids: false - Update Topic: data_labeler/update - Value: true - - Class: rviz/Image - Enabled: true - Image Topic: /rgb_hand/image_raw/labeled - Max Value: 1 - Median window: 5 - Min Value: 0 - Name: rgb_hand-Labels-Image - Normalize Range: true - Queue Size: 2 - Transport Hint: raw - Unreliable: false - Value: false - - Class: rviz/Camera - Enabled: true - Image Rendering: background and overlay - Image Topic: /rgb_hand/image_raw - Name: rgb_hand-Camera - Overlay Alpha: 0.5 - Queue Size: 2 - Transport Hint: raw - Unreliable: false - Value: false - Visibility: + - Alpha: 0.5 + Cell Size: 1 + Class: rviz/Grid + Color: 160; 160; 164 + Enabled: true + Line Style: + Line Width: 0.019999999552965164 + Value: Lines + Name: Grid + Normal Cell Count: 0 + Offset: + X: 0 + Y: 0 + Z: 0 + Plane: XY + Plane Cell Count: 10 + Reference Frame: world + Value: true + - Class: rviz/TF + Enabled: true + Frame Timeout: 15 + Frames: + All Enabled: true + base: + Value: true + base_link: + Value: true + base_link_inertia: + Value: true + flange: + Value: true + forearm_link: + Value: true + leg1_table: + Value: true + leg2_table: + Value: true + leg3_table: + Value: true + leg4_table: + Value: true + rgb_hand_link: + Value: true + rgb_hand_optical_frame: + Value: true + shoulder_link: + Value: true + table: + Value: true + tabletop_table: + Value: true + tool0: + Value: true + upper_arm_link: + Value: true + world: + Value: true + wrist_1_link: + Value: true + wrist_2_link: + Value: true + wrist_3_link: + Value: true + Marker Alpha: 1 + Marker Scale: 1 + Name: TF + Show Arrows: false + Show Axes: true + Show Names: false + Tree: + world: + table: + leg1_table: + {} + leg2_table: + {} + leg3_table: + {} + leg4_table: + {} + tabletop_table: + base_link: + base: + {} + base_link_inertia: + shoulder_link: + upper_arm_link: + forearm_link: + wrist_1_link: + wrist_2_link: + wrist_3_link: + flange: + rgb_hand_link: + rgb_hand_optical_frame: + {} + tool0: + {} + Update Interval: 0 + Value: true + - Alpha: 1 + Class: rviz/RobotModel + Collision Enabled: false + Enabled: true + Links: + All Links Enabled: true + Expand Joint Details: false + Expand Link Details: false + Expand Tree: false + Link Tree Style: Links in Alphabetic Order + base: + Alpha: 1 + Show Axes: false + Show Trail: false + base_link: + Alpha: 1 + Show Axes: false + Show Trail: false + base_link_inertia: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + flange: + Alpha: 1 + Show Axes: false + Show Trail: false + forearm_link: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + leg1_table: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + leg2_table: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + leg3_table: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + leg4_table: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + rgb_hand_link: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + rgb_hand_optical_frame: + Alpha: 1 + Show Axes: false + Show Trail: false + shoulder_link: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + table: + Alpha: 1 + Show Axes: false + Show Trail: false + tabletop_table: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + tool0: + Alpha: 1 + Show Axes: false + Show Trail: false + upper_arm_link: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + world: + Alpha: 1 + Show Axes: false + Show Trail: false + wrist_1_link: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + wrist_2_link: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + wrist_3_link: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + Name: RobotModel + Robot Description: robot_description + TF Prefix: "" + Update Interval: 0 + Value: true + Visual Enabled: true + - Class: rviz/InteractiveMarkers + Enable Transparency: true + Enabled: true + Name: ManualDataLabeler-InteractiveMarkers + Show Axes: true + Show Descriptions: true + Show Visual Aids: false + Update Topic: data_labeler/update + Value: true + - Class: rviz/Image + Enabled: true + Image Topic: /rgb_hand/image_raw/labeled + Max Value: 1 + Median window: 5 + Min Value: 0 + Name: rgb_hand-Labels-Image + Normalize Range: true + Queue Size: 2 + Transport Hint: raw + Unreliable: false Value: true - Zoom Factor: 1 + - Class: rviz/Camera + Enabled: false + Image Rendering: background and overlay + Image Topic: /rgb_hand/image_raw + Name: rgb_hand-Camera + Overlay Alpha: 0.5 + Queue Size: 2 + Transport Hint: raw + Unreliable: false + Value: false + Visibility: + Grid: true + ManualDataLabeler-InteractiveMarkers: true + RobotModel: true + TF: true + Value: true + rgb_hand-Labels-Image: true + Zoom Factor: 1 Enabled: true Global Options: Background Color: 255; 255; 255 @@ -117,23 +288,23 @@ Visualization Manager: Frame Rate: 30 Name: root Tools: - - Class: rviz/Interact - Hide Inactive Objects: true - - Class: rviz/MoveCamera - - Class: rviz/Select - - Class: rviz/FocusCamera - - Class: rviz/Measure - - Class: rviz/SetInitialPose - Theta std deviation: 0.2617993950843811 - Topic: /initialpose - X std deviation: 0.5 - Y std deviation: 0.5 - - Class: rviz/SetGoal - Topic: /move_base_simple/goal - - Class: rviz/PublishPoint - Single click: true - Topic: /clicked_point - - Class: rviz_visual_tools/KeyTool + - Class: rviz/Interact + Hide Inactive Objects: true + - Class: rviz/MoveCamera + - Class: rviz/Select + - Class: rviz/FocusCamera + - Class: rviz/Measure + - Class: rviz/SetInitialPose + Theta std deviation: 0.2617993950843811 + Topic: /initialpose + X std deviation: 0.5 + Y std deviation: 0.5 + - Class: rviz/SetGoal + Topic: /move_base_simple/goal + - Class: rviz/PublishPoint + Single click: true + Topic: /clicked_point + - Class: rviz_visual_tools/KeyTool Value: true Views: Current: @@ -144,6 +315,7 @@ Visualization Manager: Stereo Focal Distance: 1 Swap Stereo Eyes: false Value: false + Field of View: 0.7853981852531433 Focal Point: X: 0.3314114809036255 Y: 0.6464090347290039 @@ -155,36 +327,35 @@ Visualization Manager: Near Clip Distance: 0.009999999776482582 Pitch: 0.46479693055152893 Target Frame: - Value: XYOrbit (rviz) Yaw: 2.752181053161621 Saved: - - Class: rviz/Orbit - Distance: 4.057810306549072 - Enable Stereo Rendering: - Stereo Eye Separation: 0.05999999865889549 - Stereo Focal Distance: 1 - Swap Stereo Eyes: false - Value: false - Focal Point: - X: -0.424695760011673 - Y: 0.802316427230835 - Z: 0.9974352121353149 - Focal Shape Fixed Size: true - Focal Shape Size: 0.05000000074505806 - Invert Z Axis: false - Name: Orbit - Near Clip Distance: 0.009999999776482582 - Pitch: 0.5447967052459717 - Target Frame: - Value: Orbit (rviz) - Yaw: 3.929020881652832 + - Class: rviz/Orbit + Distance: 4.057810306549072 + Enable Stereo Rendering: + Stereo Eye Separation: 0.05999999865889549 + Stereo Focal Distance: 1 + Swap Stereo Eyes: false + Value: false + Field of View: 0.7853981852531433 + Focal Point: + X: -0.424695760011673 + Y: 0.802316427230835 + Z: 0.9974352121353149 + Focal Shape Fixed Size: true + Focal Shape Size: 0.05000000074505806 + Invert Z Axis: false + Name: Orbit + Near Clip Distance: 0.009999999776482582 + Pitch: 0.5447967052459717 + Target Frame: + Yaw: 3.929020881652832 Window Geometry: Displays: collapsed: false Height: 683 Hide Left Dock: false Hide Right Dock: false - QMainWindow State: 000000ff00000000fd00000004000000000000016a0000024cfc020000000bfb0000001200530065006c0065006300740069006f006e000000003d0000009b0000006400fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c00610079007301000000410000024c000000dd00fffffffb0000000a00560069006500770073000000018b00000102000000b000fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000001a00460072006f006e00740061006c00430061006d006500720061000000012e000001170000000000000000fb000000300062006100730065005f00630061006d006500720061002d004c006100620065006c0073002d0049006d00610067006500000001da000000b30000000000000000000000010000011c0000024cfc0200000015fb000000220062006100730065005f00630061006d006500720061002d0049006d00610067006500000000410000024c0000001600fffffffb0000001c00460072006f006e00740061006c002d00430061006d006500720061000000003d000001260000000000000000fb0000001e0054006f007000520069006700680074002d00430061006d00650072006100000000410000003a0000000000000000fb0000001c0054006f0070004c006500660074002d00430061006d0065007200610000000041000003840000000000000000fb0000000c00430061006d00650072006100000000410000024c0000000000000000fb0000000c00430061006d006500720061010000003d000003420000000000000000fb0000002a0054006f0070005200690067006800740049006d006100670065002d004c006100620065006c0065006400000000410000024c0000000000000000fb0000000c00430061006d00650072006101000000410000024c0000000000000000fb000000280054006f0070004c0065006600740049006d006100670065002d004c006100620065006c006500640000000177000001160000000000000000fb000000340054006f00700043005200470042004400430061006d002d0049006d006100670065002d004c006100620065006c006c006500640000000120000002a50000000000000000fb0000003a0054006f00700043005200470042004400430061006d006500720061002d0049006d006100670065002d004c006100620065006c006c0065006401000001630000012a0000000000000000fb000000280054006f00700043005200470042004400430061006d006500720061002d0049006d00610067006500000001da000000b30000000000000000fb0000001a0054006f0070004c006500660074002d0049006d00610067006501000000410000024c0000000000000000fb0000001c0054006f007000520069006700680074002d0049006d0061006700650100000169000001240000000000000000fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000001e004c00650066007400530074006500720065006f0049006d006100670065000000003d000001340000000000000000fb000000200052006900670068007400530074006500720065006f0049006d0061006700650000000122000000dd0000000000000000fb0000001600460072006f006e007400430061006d006500720061000000003d0000008b0000000000000000fb0000002a0052006900670068007400530074006500720065006f005700690064006500430061006d00650072006100000000ce0000008a0000000000000000fb00000026004c00650066007400530074006500720065006f00570069006400650049006d006100670065000000003d0000019d0000000000000000fb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000007800000003efc0100000002fb0000000800540069006d00650000000000000007800000030000fffffffb0000000800540069006d00650100000000000004500000000000000000000003e60000024c00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 + QMainWindow State: 000000ff00000000fd00000004000000000000016a00000253fc020000000dfb0000001200530065006c0065006300740069006f006e000000003d0000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003c00000197000000c800fffffffb0000000a00560069006500770073000000018b00000102000000a200fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000001a00460072006f006e00740061006c00430061006d006500720061000000012e000001170000000000000000fb000000300062006100730065005f00630061006d006500720061002d004c006100620065006c0073002d0049006d00610067006500000001da000000b30000000000000000fb0000002a007200670062005f00680061006e0064002d004c006100620065006c0073002d0049006d00610067006501000001d9000000b60000001600fffffffb0000001e007200670062005f00680061006e0064002d00430061006d0065007200610000000206000000890000001600ffffff000000010000011c0000024cfc0200000015fb000000220062006100730065005f00630061006d006500720061002d0049006d00610067006500000000410000024c0000000000000000fb0000001c00460072006f006e00740061006c002d00430061006d006500720061000000003d000001260000000000000000fb0000001e0054006f007000520069006700680074002d00430061006d00650072006100000000410000003a0000000000000000fb0000001c0054006f0070004c006500660074002d00430061006d0065007200610000000041000003840000000000000000fb0000000c00430061006d00650072006100000000410000024c0000000000000000fb0000000c00430061006d006500720061010000003d000003420000000000000000fb0000002a0054006f0070005200690067006800740049006d006100670065002d004c006100620065006c0065006400000000410000024c0000000000000000fb0000000c00430061006d00650072006101000000410000024c0000000000000000fb000000280054006f0070004c0065006600740049006d006100670065002d004c006100620065006c006500640000000177000001160000000000000000fb000000340054006f00700043005200470042004400430061006d002d0049006d006100670065002d004c006100620065006c006c006500640000000120000002a50000000000000000fb0000003a0054006f00700043005200470042004400430061006d006500720061002d0049006d006100670065002d004c006100620065006c006c0065006401000001630000012a0000000000000000fb000000280054006f00700043005200470042004400430061006d006500720061002d0049006d00610067006500000001da000000b30000000000000000fb0000001a0054006f0070004c006500660074002d0049006d00610067006501000000410000024c0000000000000000fb0000001c0054006f007000520069006700680074002d0049006d0061006700650100000169000001240000000000000000fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000001e004c00650066007400530074006500720065006f0049006d006100670065000000003d000001340000000000000000fb000000200052006900670068007400530074006500720065006f0049006d0061006700650000000122000000dd0000000000000000fb0000001600460072006f006e007400430061006d006500720061000000003d0000008b0000000000000000fb0000002a0052006900670068007400530074006500720065006f005700690064006500430061006d00650072006100000000ce0000008a0000000000000000fb00000026004c00650066007400530074006500720065006f00570069006400650049006d006100670065000000003d0000019d0000000000000000fb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000007800000003efc0100000002fb0000000800540069006d00650000000000000007800000047700fffffffb0000000800540069006d00650100000000000004500000000000000000000003e60000025300000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 Selection: collapsed: false Time: @@ -195,6 +366,8 @@ Window Geometry: collapsed: false Width: 1366 X: 0 - Y: 28 - base_camera-Image: + Y: 30 + rgb_hand-Camera: + collapsed: false + rgb_hand-Labels-Image: collapsed: false