From 690e602b41fac97ddce6ee38618c3cbf5a21002e Mon Sep 17 00:00:00 2001 From: Miguel Oliveira Date: Mon, 20 May 2024 11:24:48 +0000 Subject: [PATCH 1/3] #954 icp_based is working --- .../dahll2017/lidar_to_rgb_dahll_2017.py | 238 ++++++++++++++---- 1 file changed, 185 insertions(+), 53 deletions(-) diff --git a/atom_evaluation/scripts/other_calibrations/dahll2017/lidar_to_rgb_dahll_2017.py b/atom_evaluation/scripts/other_calibrations/dahll2017/lidar_to_rgb_dahll_2017.py index 955adb8b..07259784 100644 --- a/atom_evaluation/scripts/other_calibrations/dahll2017/lidar_to_rgb_dahll_2017.py +++ b/atom_evaluation/scripts/other_calibrations/dahll2017/lidar_to_rgb_dahll_2017.py @@ -56,6 +56,123 @@ # ------------------------------------------------------------------------------- +def ku(a, b): + true_points = a + mapping_points = b + + mapped_centroid = np.average(mapping_points, axis=0) + true_centroid = np.average(true_points, axis=0) + + h = mapping_points.T @ true_points + u, s, vt = np.linalg.svd(h) + v = vt.T + + d = np.linalg.det(v @ u.T) + e = np.array([[1, 0, 0], [0, 1, 0], [0, 0, d]]) + + r = v @ e @ u.T + + tt = true_centroid - np.matmul(r, mapped_centroid) + + transform = np.eye(4) + transform[0:3, 0:3] = r + transform[0:3, 3] = tt + + return transform + + +def kabsch_umeyama(A, B): + assert A.shape == B.shape + n, m = A.shape + + EA = np.mean(A, axis=0) + EB = np.mean(B, axis=0) + VarA = np.mean(np.linalg.norm(A - EA, axis=1) ** 2) + + H = ((A - EA).T @ (B - EB)) / n + U, D, VT = np.linalg.svd(H) + d = np.sign(np.linalg.det(U) * np.linalg.det(VT)) + S = np.diag([1] * (m - 1) + [d]) + + R = U @ S @ VT + c = VarA / np.trace(np.diag(D) @ S) + t = EA - c * R @ EB + + print(R) + print(t) + + transform = np.eye(4) + transform[0:3, 0:3] = R + + print(R) + print(t) + transform[0:3, 3] = t + + return transform, c + +# Implements Kabsch algorithm - best fit. +# Supports scaling (umeyama) +# Compares well to SA results for the same data. +# Input: +# Nominal A Nx3 matrix of points +# Measured B Nx3 matrix of points +# Returns s,R,t +# s = scale B to A +# R = 3x3 rotation matrix (B to A) +# t = 3x1 translation vector (B to A) + + +def rigid_transform_3D(A, B, scale=False): + # From https://gist.github.com/oshea00/dfb7d657feca009bf4d095d4cb8ea4be + + assert len(A) == len(B) + + N = A.shape[0] # total points + + centroid_A = np.mean(A, axis=0) + centroid_B = np.mean(B, axis=0) + + # center the points + AA = A - np.tile(centroid_A, (N, 1)) + BB = B - np.tile(centroid_B, (N, 1)) + + # dot is matrix multiplication for array + if scale: + H = np.transpose(BB) * AA / N + else: + # H = np.transpose(BB) * AA + # NOTE in + # they say the line above should be + H = np.transpose(AA) @ BB + + U, S, Vt = np.linalg.svd(H) + + R = Vt.T * U.T + + # special reflection case + if np.linalg.det(R) < 0: + print("Reflection detected") + Vt[2, :] *= -1 + R = Vt.T * U.T + + if scale: + varA = np.var(A, axis=0).sum() + c = 1 / (1 / varA * np.sum(S)) # scale factor + t = -R * (centroid_B.T * c) + centroid_A.T + else: + c = 1 + print(-R @ centroid_B.T) + t = -R @ centroid_B.T + centroid_A.T + + transform = np.eye(4) + transform[0:3, 0:3] = R + + print(R) + print(t) + transform[0:3, 3] = t + return c, transform + + def get4x4TransformFromXYZWPR(xyzwpr): T = np.zeros((4, 4), float) @@ -316,29 +433,40 @@ def main(): # ------------------------------------------ # Run calibration # ------------------------------------------ - # output should be estimated camera_T_lidar tranform - if args['method_name'] == 'icp_based': - # Estimate one transformation per collection - camera_T_lidar_lst = [] - for idx, (collection_key, collection) in enumerate(dataset["collections"].items()): + # Estimate one transformation per collection + camera_T_lidar_lst = [] + for idx, (collection_key, collection) in enumerate(dataset["collections"].items()): + + # Camera corners to required format + corners_in_camera = corners_in_camera_lst[idx] + # print('corners_in_lidar=\n' + str(corners_in_lidar)) + + # Lidar corners to required format + corners_in_lidar = np.zeros((4, 4)) + for idx, (intersection_key, intersection) in enumerate( + annotations[collection_key]['intersections'].items()): + + corners_in_lidar[0, idx] = intersection['x'] + corners_in_lidar[1, idx] = intersection['y'] + corners_in_lidar[2, idx] = intersection['z'] + corners_in_lidar[3, idx] = 1.0 + # print('corners_in_camera=\n' + str(corners_in_camera)) + + # compute averages + average_corners_in_camera = np.reshape(np.mean(corners_in_camera, axis=1), (4, 1)) + average_corners_in_lidar = np.reshape(np.mean(corners_in_lidar, axis=1), (4, 1)) + + # output should be estimated camera_T_lidar tranform + if args['method_name'] == 'icp_based': # prepare two open3d point clouds and then use open3d's icp - corners_in_camera = corners_in_camera_lst[idx] camera_corners = o3d.geometry.PointCloud() camera_corners.points = o3d.utility.Vector3dVector( corners_in_camera[0: 3, :].transpose()) - corners_in_camera = np.zeros((3, 4)) - for idx, (intersection_key, intersection) in enumerate( - annotations[collection_key]['intersections'].items()): - - corners_in_camera[0, idx] = intersection['x'] - corners_in_camera[1, idx] = intersection['y'] - corners_in_camera[2, idx] = intersection['z'] - lidar_corners = o3d.geometry.PointCloud() - lidar_corners.points = o3d.utility.Vector3dVector(corners_in_camera.transpose()) + lidar_corners.points = o3d.utility.Vector3dVector(corners_in_lidar.transpose()) criteria = o3d.pipelines.registration.ICPConvergenceCriteria( relative_fitness=1e-06, relative_rmse=1e-06, max_iteration=300) @@ -355,34 +483,11 @@ def main(): 'Collection ' + collection_key + ' estimated transform=\n' + str(reg_p2p.transformation)) - # Estimate final camera_T_lidar transformation using average as proposed in Dahll2017 - print('gt transform=\n' + str(camera_T_lidar_gt)) - camera_T_lidar = averageTransforms(camera_T_lidar_lst) - - elif args['method_name'] == 'closed_form': - - # Estimate one transformation per collection - camera_T_lidar_lst = [] - for idx, (collection_key, collection) in enumerate(dataset["collections"].items()): - - # Camera corners to required format - corners_in_camera = corners_in_camera_lst[idx] - # print('corners_in_lidar=\n' + str(corners_in_lidar)) - - # Lidar corners to required format - corners_in_lidar = np.zeros((4, 4)) - for idx, (intersection_key, intersection) in enumerate( - annotations[collection_key]['intersections'].items()): + # Estimate final camera_T_lidar transformation using average as proposed in Dahll2017 + print('gt transform=\n' + str(camera_T_lidar_gt)) + camera_T_lidar = averageTransforms(camera_T_lidar_lst) - corners_in_lidar[0, idx] = intersection['x'] - corners_in_lidar[1, idx] = intersection['y'] - corners_in_lidar[2, idx] = intersection['z'] - corners_in_lidar[3, idx] = 1.0 - # print('corners_in_camera=\n' + str(corners_in_camera)) - - # compute averages - average_corners_in_camera = np.reshape(np.mean(corners_in_camera, axis=1), (4, 1)) - average_corners_in_lidar = np.reshape(np.mean(corners_in_lidar, axis=1), (4, 1)) + elif args['method_name'] == 'closed_form': # align to zero zero_aligned_corners_in_camera = corners_in_camera - \ @@ -395,14 +500,22 @@ def main(): # https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.align_vectors.html # we need N,3 lists for the input vectors a and b a = np.array((4, 3)) - a = zero_aligned_corners_in_camera[0:3, :].T + a = corners_in_camera[0:3, :].T + # a = zero_aligned_corners_in_camera[0:3, :].T b = np.array((4, 3)) - b = zero_aligned_corners_in_lidar[0:3, :].T + b = corners_in_lidar[0:3, :].T + # b = zero_aligned_corners_in_lidar[0:3, :].T rot, rssd, sens = Rotation.align_vectors(a, b, return_sensitivity=True) rotation = rot.as_matrix() + # transform2, c = kabsch_umeyama(a, b) + transform2 = ku(a, b) + + # c, transform2 = rigid_transform_3D(a, b, scale=False) + print('transform2 =\n' + str(transform2)) + # Compute the translation using eq 8 from the paper # https://arxiv.org/pdf/1705.09785 @@ -415,13 +528,13 @@ def main(): transform[0:3, 0:3] = rotation transform[0:3, 3] = translation[0:3, 0] - camera_T_lidar_lst.append(transform) + camera_T_lidar_lst.append(transform2) print('Collection ' + collection_key + ' estimated transform=\n' + str(transform)) - # Estimate final camera_T_lidar transformation using average as proposed in Dahll2017 - print('gt transform=\n' + str(camera_T_lidar_gt)) - camera_T_lidar = averageTransforms(camera_T_lidar_lst) + # Estimate final camera_T_lidar transformation using average as proposed in Dahll2017 + print('gt transform=\n' + str(camera_T_lidar_gt)) + camera_T_lidar = averageTransforms(camera_T_lidar_lst) # ------------------------------------------ # Define visualization @@ -432,11 +545,11 @@ def main(): camera_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1) entities.append(camera_frame) - # Add pattern frames - for idx, (collection_key, collection) in enumerate(dataset["collections"].items()): - pattern_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.2) - pattern_frame = pattern_frame.transform(camera_T_pattern_lst[idx]) - entities.append(pattern_frame) + # # Add pattern frames + # for idx, (collection_key, collection) in enumerate(dataset["collections"].items()): + # pattern_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.2) + # pattern_frame = pattern_frame.transform(camera_T_pattern_lst[idx]) + # entities.append(pattern_frame) # Draw lidar points (using estimated transform) for idx, (collection_key, collection) in enumerate(dataset["collections"].items()): @@ -507,6 +620,25 @@ def main(): color = colormap[idx, 0:3]*0.7 # use lidar cylinders less darker to distinguish draw_cylinder(entities, x, y, z, radius=0.01, height=0.15, color=color) + # Draw lidar coordinate frames per collection (should all be close to the same pose) + for idx, (collection_key, collection) in enumerate(dataset["collections"].items()): + collection_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.1) + collection_frame = collection_frame.transform(camera_T_lidar_lst[idx]) + color = colormap[idx, 0:3] + collection_frame.paint_uniform_color(color) + entities.append(collection_frame) + + # Draw estimated joint lidar coordinate frame + estimated_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.2) + estimated_frame = estimated_frame.transform(camera_T_lidar) + entities.append(estimated_frame) + + # Draw ground truth joint lidar coordinate frame + ground_truth_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.2) + ground_truth_frame = ground_truth_frame.transform(camera_T_lidar_gt) + ground_truth_frame.paint_uniform_color((0.1, 0.1, 0.1)) + entities.append(ground_truth_frame) + o3d.visualization.draw_geometries(entities, zoom=view['trajectory'][0]['zoom'], front=view['trajectory'][0]['front'], From bd37a3ef6b413751029fbef53f0cab11d09ffaf4 Mon Sep 17 00:00:00 2001 From: Miguel Oliveira Date: Thu, 11 Jul 2024 16:46:46 +0100 Subject: [PATCH 2/3] #967 working on it. --- atom_calibration/scripts/odometry_inspector | 276 ++++++ .../odometry_inspector/README.md | 26 + .../odometry_inspector/__init__.py | 0 .../depth_manual_labeling.py | 223 +++++ .../lidar3d_manual_labeling.py | 137 +++ .../odometry_inspector/visualization.py | 903 ++++++++++++++++++ 6 files changed, 1565 insertions(+) create mode 100644 atom_calibration/scripts/odometry_inspector create mode 100644 atom_calibration/src/atom_calibration/odometry_inspector/README.md create mode 100644 atom_calibration/src/atom_calibration/odometry_inspector/__init__.py create mode 100644 atom_calibration/src/atom_calibration/odometry_inspector/depth_manual_labeling.py create mode 100644 atom_calibration/src/atom_calibration/odometry_inspector/lidar3d_manual_labeling.py create mode 100644 atom_calibration/src/atom_calibration/odometry_inspector/visualization.py diff --git a/atom_calibration/scripts/odometry_inspector b/atom_calibration/scripts/odometry_inspector new file mode 100644 index 00000000..ab2cf31b --- /dev/null +++ b/atom_calibration/scripts/odometry_inspector @@ -0,0 +1,276 @@ +#!/usr/bin/env python3 + +# System and standard imports +import argparse +import os +import os.path +import sys +from functools import partial + +# ros imports +from atom_core.utilities import createLambdaExpressionsForArgs +import rospy +from atom_core.naming import generateLabeledTopic +from geometry_msgs.msg import PointStamped +from sensor_msgs.msg import PointCloud2 + +# 3rd-party imports +from colorama import Back, Fore, Style +from pynput import keyboard + +# Atom imports +from atom_calibration.odometry_inspector.depth_manual_labeling import clickedPointsCallback, clickedPointsReset +from atom_calibration.odometry_inspector.lidar3d_manual_labeling import * +from atom_calibration.odometry_inspector.visualization import setupVisualization, visualizationFunction +from atom_core.dataset_io import (filterCollectionsFromDataset, + filterSensorsFromDataset, loadResultsJSON, saveAtomDataset) + +# own packages + + +# global variables ... are forbidden + +# ------------------------------------------------------------------------------- +# --- FUNCTIONS +# ------------------------------------------------------------------------------- + + +def printHelp(): + + s = 'Drag with the left button to select objects in the 3D scene.\n' \ + ' Hold the Alt key to change viewpoint as in the Move tool.'\ + ' Holding the Shift key will allow adding to the current selection.\n'\ + ' Holding the Ctrl key will allow subtraction from the current selection.\n'\ + ' The following keys are also available:\n'\ + ' p - Publish selected points to /rviz_selected_points\n'\ + ' b - Publish selected points to /rviz_selected_border_points\n'\ + ' r - Publish selected points to /rviz_selected_remove_points\n'\ + ' c - Publish selected points to /rviz_selected_clear_all_points\n'\ + ' d - Switch between detect (find pattern inside user defined polygon) and delete (delete boundary points inside polygon) modes for depth labeling\n'\ + ' s - Save dataset\n'\ + ' q - Save dataset and quit \n' + + print(s) + + +def keyPressedCallback(key, selection, dataset, args, depth_mode, output_file): + + if key is None: + return + # TODO #402 does not work if some of the collections in the middles, e.g. '1' are removed. An easy test is to run a -csf "lambda x: x in ['0', '2']" + + # print("collection_key = " + str(selection['collection_key'])) + # print("previous_collection_key = " + str(selection['previous_collection_key'])) + + # Shortcut variables + idx_collection = list(dataset['collections'].keys()).index(selection['collection_key']) + idx_max_collection = len(dataset['collections'].keys()) - 1 + + # Convert from type to string to keep uniform. + + try: + key_char = key.char + # print(key_char) + if key_char == 's': # Saves dataset. + saveAtomDataset(output_file, dataset, freeze_dataset=True) + print('A new dataset was saved in ' + output_file) + elif key_char == 'q': # Saves dataset and quits. + saveAtomDataset(output_file, dataset, freeze_dataset=True) + print('A new dataset was saved in ' + output_file) + print('Exiting ...') + selection['exit'] = True + elif key_char == 'h': # Prints help. + printHelp() + elif key_char == 'd': # Prints help. + + if depth_mode['mode'] == 'detect': + depth_mode['mode'] = 'delete' + else: + depth_mode['mode'] = 'detect' + + print('Changed depth mode to ' + depth_mode['mode']) + + except AttributeError: + if key == keyboard.Key.right: # Save and move to collection + 1. + if idx_collection < idx_max_collection: + selection['previous_collection_key'] = selection['collection_key'] + selection['collection_key'] = list(dataset['collections'].keys())[idx_collection+1] + print('Changed selected_collection_key to ' + str(selection['collection_key'])) + saveAtomDataset(output_file, dataset, freeze_dataset=True) + + # More intuitive if detect mode is True at the beginning of a new collection + depth_mode['mode'] = 'detect' + print('Changed depth mode to ' + depth_mode['mode']) + else: + print(Fore.RED + 'This is the last collection!!' + Fore.RESET) + elif key == keyboard.Key.left: # Save and move to collection - 1. + if idx_collection > 0: + selection['previous_collection_key'] = selection['collection_key'] + selection['collection_key'] = list(dataset['collections'].keys())[idx_collection-1] + print('Changed selected_collection_key to ' + str(selection['collection_key'])) + saveAtomDataset(output_file, dataset, freeze_dataset=True) + + # More intuitive if detect mode is True at the beginning of a new collection + depth_mode['mode'] = 'detect' + print('Changed depth mode to ' + depth_mode['mode']) + + else: + print(Fore.RED + 'This is the first collection!!' + Fore.RESET) + + +# ------------------------------------------------------------------------------- +# --- MAIN +# ------------------------------------------------------------------------------- + + +def main(): + ap = argparse.ArgumentParser() + ap.add_argument("-json", "--json_file", + help="Json file containing input dataset.", type=str, required=True) + ap.add_argument( + "-ow", "--overwrite", + help="Overwrites the data_corrected.json without asking for permission.", + action='store_true') + ap.add_argument( + "-rnb", "--remove_nan_border", + help="Option for the labeling of depth images. It Will run a detection of nan values in the image, searching for the actual area of the image which is used. Then, border detection will use this estimated area...", + action='store_true') + ap.add_argument( + "-rc", "--reference_collection", + help="", required=True) + ap.add_argument( + "-rs", "--reference_sensor", + help="", required=True) + ap.add_argument( + "-csf", "--collection_selection_function", default=None, type=str, + help="A string to be evaluated into a lambda function that receives a collection name as input and " + "returns True or False to indicate if the collection should be loaded (and used in the " + "optimization). The Syntax is lambda name: f(x), where f(x) is the function in python " + "language. Example: lambda name: int(name) > 5 , to load only collections 6, 7, and onward.") + + # Roslaunch adds two arguments (__name and __log) that break our parser. Lets remove those. + arglist = [x for x in sys.argv[1:] if not x.startswith('__')] + # these args have the selection functions as strings + args_original = vars(ap.parse_args(args=arglist)) + args = createLambdaExpressionsForArgs(args_original) # selection functions are now lambdas + + # Arguments which are not options for odometry_inspector + args['ros_visualization'] = True + args['show_images'] = True + args['use_incomplete_collections'] = True + args['remove_partial_detections'] = False + # args['collection_selection_function'] = None + + # --------------------------------------- + # --- INITIALIZATION Read data from file + # --------------------------------------- + # Loads a json file containing the detections. Returned json_file has path resolved by urireader. + dataset, json_file = loadResultsJSON(args['json_file'], args['collection_selection_function']) + + # --------------------------------------- + # --- Filter some collections and / or sensors from the dataset + # --------------------------------------- + # dataset = filterCollectionsFromDataset(dataset, args) # During dataset review there is no need to filter out collections + + output_file = os.path.join(os.path.dirname(args['json_file']), 'dataset_corrected.json') + if os.path.exists(output_file) and args['json_file'] != output_file and not args['overwrite']: + ans = input('The file dataset_corrected.json already exists.' + ' Do you want to overwrite? (Y/n)') + if ans.lower() == 'n': + sys.exit(0) + + dataset = filterSensorsFromDataset(dataset, args) # filter sensors + + print( + 'Loaded dataset containing ' + str(len(dataset['sensors'].keys())) + ' sensors and ' + + str(len(dataset['collections'].keys())) + ' collections.') + + depth_mode = {'mode': 'detect'} + + # --------------------------------------- + # --- Define selection + # --------------------------------------- + # Lets start with the first key on the collections dictionary. + # Data structure used to save the state of navigation throughout the collections in the dataset. + selection = {'collection_key': list(dataset['collections'].keys())[ + 0], 'previous_collection_key': None, 'exit': False} + + print("Configuring visualization ... ") + graphics = setupVisualization(dataset, args, selection['collection_key']) + + # --------------------------------------- + # --- lidar3d modality subscribers + # --------------------------------------- + + # Define subscriber to receive the selected points + rospy.Subscriber("/rviz/selected_points", PointCloud2, + partial(selectedPointsCallback, selection=selection, dataset=dataset)) + + rospy.Subscriber("/rviz/selected_border_points", PointCloud2, + partial(selectedPointsBorderCallback, selection=selection, dataset=dataset)) + + rospy.Subscriber("/rviz/selected_remove_points", PointCloud2, + partial(selectedPointsRemoveCallback, selection=selection, dataset=dataset)) + + rospy.Subscriber("/rviz/selected_clear_all_points", PointCloud2, + partial(selectedPointsClearAllCallback, selection=selection, dataset=dataset)) + + # --------------------------------------- + # --- Depth modality subscribers + # --------------------------------------- + + # Subscriber for the image_click plugin + # this stores the pixel coordinates of clicked points per collection, and per sensor key + clicked_points = {collection_key: {} for collection_key in dataset['collections']} + + # Initialize clicked points for all collections and sensors + for collection_key in dataset['collections']: + for sensor_key, sensor in dataset['sensors'].items(): + if sensor['modality'] == 'depth': + clickedPointsReset(clicked_points, collection_key, sensor_key) + + # Create a subscriber for each depth sensor + for sensor_key, sensor in dataset['sensors'].items(): + if sensor['modality'] == 'depth': + + points_topic = generateLabeledTopic( + dataset['sensors'][sensor_key]['topic'], + type='2d') + '/mouse_click' + rospy.Subscriber(points_topic, PointStamped, + partial( + clickedPointsCallback, clicked_points=clicked_points, + dataset=dataset, sensor_key=sensor_key, selection=selection, + depth_mode=depth_mode, args=args)) + + # --------------------------------------- + # --- RGB modality subscribers + # --------------------------------------- + # TODO #394 To implement in the future. + + # --------------------------------------- + # --- Define callback to navigate through collections + # --------------------------------------- + listener = keyboard.Listener( + on_press=partial( + keyPressedCallback, selection=selection, dataset=dataset, args=args, + depth_mode=depth_mode, output_file=output_file)) + listener.start() + + # --------------------------------------- + # --- Loop while displaying selected collection + # --------------------------------------- + print('\nPress ' + Fore.BLUE + '"h"' + Style.RESET_ALL + ' for instructions.\n') + + rate = rospy.Rate(10) # in hertz. + tic = rospy.Time.now() + while not rospy.is_shutdown() and not selection['exit']: + models = {'dataset': dataset, 'args': args, 'graphics': graphics} + visualizationFunction(models=models, selection=selection, clicked_points=clicked_points) + rate.sleep() + + # if (rospy.Time.now() - tic).to_sec() > 3: + # tic = rospy.Time.now() + + +if __name__ == "__main__": + main() diff --git a/atom_calibration/src/atom_calibration/odometry_inspector/README.md b/atom_calibration/src/atom_calibration/odometry_inspector/README.md new file mode 100644 index 00000000..e8d5e1e3 --- /dev/null +++ b/atom_calibration/src/atom_calibration/odometry_inspector/README.md @@ -0,0 +1,26 @@ +# Dataset Reviewer + +Tool to manually label 3D point clouds. + +Launch rviz preconfigured: + + roslaunch dataset_playback.launch + +Launch dataset_reviewer script: + + rosrun atom_calibration dataset_playback2 -json $ATOM_DATASETS///dataset.json -uic -si + +Then, select the SelectedPointsPublisher plugin. + +Once the plugin is selected, click on the points you want and press: + +- **p** to add those points to 'idxs' +- **b** to add those points to 'idxs_limit_points' +- **r** to remove those points from 'idxs' and/or 'idxs_limit_points' +- **c** to remove all points from both 'idxs' and 'idxs_limit_points' + +To navigate across collections, use the **right** and **left** arrows. + +If you want to label multiple point clouds, it is advisable to only visualize the one you are currently labeling. + +To save the corrected dataset, just select the terminal that launched the dataset_reviewer and press Ctr+C followed by yes. \ No newline at end of file diff --git a/atom_calibration/src/atom_calibration/odometry_inspector/__init__.py b/atom_calibration/src/atom_calibration/odometry_inspector/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/atom_calibration/src/atom_calibration/odometry_inspector/depth_manual_labeling.py b/atom_calibration/src/atom_calibration/odometry_inspector/depth_manual_labeling.py new file mode 100644 index 00000000..fa623357 --- /dev/null +++ b/atom_calibration/src/atom_calibration/odometry_inspector/depth_manual_labeling.py @@ -0,0 +1,223 @@ +import math +from copy import copy, deepcopy + +import cv2 +import numpy as np +import scipy.spatial.distance +from atom_calibration.collect.label_messages import * +from atom_core.dataset_io import getMsgAndCvImageFromDictionaryDepth +from sqlalchemy import true +from atom_core.drawing import drawSquare2D + + +def normalizeDepthImage(image, max_value=5): + height, width = image.shape + + # Removing nans before casting to uint8 + # https://github.com/lardemua/atom/issues/643 + image_without_nans = np.nan_to_num(image) + + gui_image = np.zeros((height, width, 3), dtype=np.uint8) + gui_image[:, :, 0] = image_without_nans / max_value * 255 + gui_image[:, :, 1] = image_without_nans / max_value * 255 + gui_image[:, :, 2] = image_without_nans / max_value * 255 + return gui_image + + +def drawLabelsOnImage(labels, image, color_idxs=(0, 200, 255), color_idxs_limits=(255, 0, 200)): + _, width, _ = image.shape + + for idx in labels['idxs']: + # convert from linear idx to x_pix and y_pix indices. + y = int(idx / width) + x = int(idx - y * width) + cv2.line(image, (x, y), (x, y), color_idxs, 3) + + for idx in labels['idxs_limit_points']: + # convert from linear idx to x_pix and y_pix indices. + y = int(idx / width) + x = int(idx - y * width) + cv2.line(image, (x, y), (x, y), color_idxs_limits, 3) + + return image + + +def clickedPointsCallback(point_msg, clicked_points, dataset, sensor_key, selection, + depth_mode, args, tolerance_radius=20): + + collection_key = selection['collection_key'] + + + if clicked_points[collection_key][sensor_key]['valid_polygon']: + clickedPointsReset(clicked_points, collection_key, sensor_key) + + # Add point to list of clicked points + point = {'x': int(point_msg.point.x), 'y': int(point_msg.point.y)} + clicked_points[collection_key][sensor_key]['points'].append(point) + + # Retrieving clicked points for the current sensor + clicked_sensor_points = clicked_points[collection_key][sensor_key]['points'] + + if len(clicked_sensor_points) < 3: # if less than 3 points polygon has no area + clicked_points[collection_key][sensor_key]['valid_polygon'] = False + return + + # Compute the distance between the first and last placed points + start_point = [clicked_sensor_points[0]['x'], clicked_sensor_points[0]['y']] + end_point = [clicked_sensor_points[-1]['x'], clicked_sensor_points[-1]['y']] + start_to_end_distance = scipy.spatial.distance.euclidean(start_point, end_point) + + # polygon closed, compute new labels + if start_to_end_distance < tolerance_radius: + tic = rospy.Time.now() + + if depth_mode['mode'] == 'delete': + print('Deleting depth boundary inside polygon ...') + + height = dataset['sensors'][sensor_key]['camera_info']['height'] + width = dataset['sensors'][sensor_key]['camera_info']['width'] + pattern_mask = getMaskFromPoints(clicked_points[collection_key][sensor_key]['points'], height, width) + + idxs_to_remove = [] + for pattern_key in dataset['calibration_config']['calibration_patterns'].keys(): + for idx, linear_idx in enumerate(dataset['collections'][collection_key]['labels'][pattern_key][sensor_key]['idxs_limit_points']): + y = int(int(linear_idx) / int(width)) + x = linear_idx - width * y + + if pattern_mask[y,x] == 255: # point inside polygon + idxs_to_remove.append(idx) + + idxs_to_remove.reverse() + for idx in idxs_to_remove: + del dataset['collections'][collection_key]['labels'][sensor_key]['idxs_limit_points'][idx] + + clicked_points[collection_key][sensor_key]['valid_polygon'] = True + + print('Completed deleting depth boundary inside polygon') + + + elif depth_mode['mode'] == 'detect': + + print('Labeling pattern from user defined polygon .. it may take some time ...') + height = dataset['sensors'][sensor_key]['camera_info']['height'] + width = dataset['sensors'][sensor_key]['camera_info']['width'] + pattern_mask = getMaskFromPoints(clicked_points[collection_key][sensor_key]['points'], height, width) + msg, image = getMsgAndCvImageFromDictionaryDepth(dataset['collections'][collection_key]['data'][sensor_key]) + + # Filter out edges where depth is very far away + # https://github.com/lardemua/atom/issues/612 + + # print('pattern_mask.dtype = ' + str(pattern_mask.dtype)) + # print('type(image) = ' + str(type(image))) + # print('image.dtype = ' + str(image.dtype)) + + # calculate moments of binary image + M = cv2.moments(pattern_mask) + + # calculate x,y coordinate of center + cX = int(M["m10"] / M["m00"]) + cY = int(M["m01"] / M["m00"]) + center = (cX, cY) + # put text and highlight the center + + pattern_mask_bool = pattern_mask.astype(bool) + pattern_mask_filtered = deepcopy(pattern_mask) + + height, width = image.shape + for x in range(0, width): + for y in range(0, height): + if pattern_mask[y,x] == 255: + + # being start and end two points (x1,y1), (x2,y2) + discrete_line = list(zip(*line(*center, *(x,y)))) + + ranges = [image[b,a] for a,b in discrete_line] + idxs_to_remove = [] + remove_all = False + range_prev = ranges[0] + for (x0, y0), (x1, y1) in zip(discrete_line[0:-1], discrete_line[1:]): + value0 = image[y0,x0] + value1 = image[y1,x1] + + if np.isnan(value1): + continue + + if np.isnan(value0): + value0 = range_prev + else: + range_prev = value0 + + diff = abs(value1 - value0) + if diff > 0.1 or remove_all: + # print('removing pixel x=' + str(x1) + ',y=' + str(y1) + ' from mask') + pattern_mask_filtered[y1,x1] = 0 + remove_all = True + + # cv2.imshow('pattern_mask', pattern_mask) + # gui = normalizeDepthImage(image, max_value=5) + # drawSquare2D(gui, x, y, size=7, color=(50, 190, 0), thickness=2) + # + # cv2.circle(gui, (cX, cY), 5, (255, 255, 255), -1) + # # cv2.putText(gui, "centroid", (cX - 25, cY - 25),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255),2) + # + # for x,y in discrete_line: + # cv2.line(gui, (x, y), (x, y), (128,0,0), 1) + # + + # cv2.imshow('gui', gui) + # cv2.waitKey(10) + + # pattern_mask_removals = (np.logical_xor(pattern_mask.astype(bool), pattern_mask_filtered.astype(bool))).astype(np.uint8)*255 + # cv2.imshow('pattern_mas_removals', pattern_mask_removals) + # cv2.waitKey(0) + # print('DONE') + # new_values = image[pattern_mask_filtered.astype(bool)] + # with np.printoptions(threshold=np.inf): + # print(new_values) + # # cv2.imwrite('pattern_mask_filtered.png', pattern_mask_filtered) + # pattern_mask_filtered = cv2.imread('pattern_mask_filtered.png', cv2.IMREAD_GRAYSCALE) + # cv2.namedWindow('pattern_filtered', cv2.WINDOW_NORMAL) + # cv2.imshow('pattern_filtered', pattern_mask_filtered) + + # TODO #646 we should use argument filter_border_edges here as well + labels, gui_image, _ = labelDepthMsg(msg, seed=None, bridge=None, + pyrdown=0, scatter_seed=True, + scatter_seed_radius=8, + debug=False, + subsample_solid_points=7, limit_sample_step=1, + pattern_mask=pattern_mask_filtered, + filter_border_edges=0.025, + remove_nan_border=args['remove_nan_border']) + + + # Update the idxs and idxs_limit labels + # TODO only works for first pattern + first_pattern_key = list(dataset['calibration_config']['calibration_patterns'].keys())[0] + dataset['collections'][collection_key]['labels'][first_pattern_key][sensor_key] = labels + + clicked_points[collection_key][sensor_key]['valid_polygon'] = True + + print('Labeling pattern completed in ' + str((rospy.Time.now() - tic).to_sec())) + + +def clickedPointsReset(clicked_points, collection_key, sensor_key): + clicked_points[collection_key][sensor_key] = {'points': [], 'valid_polygon': False} + return clicked_points + + +def getMaskFromPoints(points, image_height, image_width): + pattern_mask_rgb = np.zeros((image_height, image_width, 3), dtype=np.uint8) + + points_list = [] + for point in points[:-1]: + point_tuple = (point['x'], point['y']) + points_list.append(point_tuple) + points_array = (np.array([points_list])) + + # Fill poly needs an np.array of a list of tuples + cv2.fillPoly(pattern_mask_rgb, pts=points_array, + color=(255, 255, 255)) + + pattern_mask, _, _ = cv2.split(pattern_mask_rgb) # single channel mask + + return pattern_mask diff --git a/atom_calibration/src/atom_calibration/odometry_inspector/lidar3d_manual_labeling.py b/atom_calibration/src/atom_calibration/odometry_inspector/lidar3d_manual_labeling.py new file mode 100644 index 00000000..73abdab7 --- /dev/null +++ b/atom_calibration/src/atom_calibration/odometry_inspector/lidar3d_manual_labeling.py @@ -0,0 +1,137 @@ +import sensor_msgs.point_cloud2 as pc2 + + +def selectedPointsCallback(selected_point_cloud, selection, dataset): + """ + Callback function to add selected points to the dataset as idxs. + """ + + collection_key = selection['collection_key'] + print('Adding selected points to idxs labels of collection ' + collection_key + ' ...') + + # Extract xyz coordinates from the selected points + points_selected = pc2.read_points(selected_point_cloud) + gen_selected_points = list(points_selected) + + if len(gen_selected_points) == 0: # To prevent unnecessary operations. + return + + # sensor from the point cloud + # TODO only works for first pattern + first_pattern_key = list(dataset['calibration_config']['calibration_patterns'].keys())[0] + sensor = list(dataset['collections'][collection_key]['labels'][first_pattern_key].keys())[ + int(gen_selected_points[0][4])] + + selected_idxs = [] + for point in gen_selected_points: + selected_idxs.append(int(point[3])) + + idx_center = dataset['collections'][collection_key]['labels'][first_pattern_key][sensor]['idxs'] + + [idx_center.append(x) for x in selected_idxs if x not in idx_center] + dataset['collections'][collection_key]['labels'][first_pattern_key][sensor]['idxs'] = idx_center + + if dataset['collections'][collection_key]['labels'][first_pattern_key][sensor]['idxs'] != [] and \ + dataset['collections'][collection_key]['labels'][first_pattern_key][sensor]['detected'] == False: + dataset['collections'][collection_key]['labels'][first_pattern_key][sensor]['detected'] = True + + print('Done.') + +def selectedPointsBorderCallback(selected_point_cloud, selection, dataset): + """ + Callback function to add selected points to the dataset as idxs_limit_points. + """ + + collection_key = selection['collection_key'] + print('Adding selected points to idxs_limit_points labels of collection ' + collection_key + ' ...') + + # Extract xyz coordinates from the selected points + points_selected = pc2.read_points(selected_point_cloud) + gen_selected_points = list(points_selected) + + if len(gen_selected_points) == 0: # To prevent unnecessary operations. + return + + # sensor from the point cloud + # TODO only works for first pattern + first_pattern_key = list(dataset['calibration_config']['calibration_patterns'].keys())[0] + sensor = list(dataset['collections'][collection_key]['labels'][first_pattern_key].keys())[ + int(gen_selected_points[0][4])] + + selected_idxs = [] + for point in gen_selected_points: + selected_idxs.append(int(point[3])) + + idx_center = dataset['collections'][collection_key]['labels'][first_pattern_key][sensor]['idxs'] + idx_border = dataset['collections'][collection_key]['labels'][first_pattern_key][sensor]['idxs_limit_points'] + + [idx_center.append(x) for x in selected_idxs if x not in idx_center] + [idx_border.append(x) for x in selected_idxs if x not in idx_border] + + dataset['collections'][collection_key]['labels'][first_pattern_key][sensor]['idxs'] = idx_center + dataset['collections'][collection_key]['labels'][first_pattern_key][sensor]['idxs_limit_points'] = idx_border + + + print('Done.') + +def selectedPointsRemoveCallback(selected_point_cloud, selection, dataset): + """ + Callback function to remove selected points from the dataset. + """ + collection_key = selection['collection_key'] + print('Removing selected points from labels of collection ' + collection_key + ' ...') + + # Extract xyz coordinates from the selected points + points_selected = pc2.read_points(selected_point_cloud) + gen_selected_points = list(points_selected) + + if len(gen_selected_points) == 0: # To prevent unnecessary operations. + return + + # sensor from the point cloud + # TODO only works for first pattern + first_pattern_key = list(dataset['calibration_config']['calibration_patterns'].keys())[0] + sensor = list(dataset['collections'][collection_key]['labels'][first_pattern_key].keys())[ + int(gen_selected_points[0][4])] + + selected_idxs = [] + for point in gen_selected_points: + selected_idxs.append(int(point[3])) + + # remove these points from the idx and idxs_limit_points + idx_center = dataset['collections'][collection_key]['labels'][first_pattern_key][sensor]['idxs'] + idx_border = dataset['collections'][collection_key]['labels'][first_pattern_key][sensor]['idxs_limit_points'] + + idx_center_new = [x for x in idx_center if x not in selected_idxs] + idx_border_new = [x for x in idx_border if x not in selected_idxs] + + dataset['collections'][collection_key]['labels'][first_pattern_key][sensor]['idxs'] = idx_center_new + dataset['collections'][collection_key]['labels'][first_pattern_key][sensor]['idxs_limit_points'] = idx_border_new + + print('Done.') + +def selectedPointsClearAllCallback(selected_point_cloud, selection, dataset): + """ + Callback function to remove all points of the selected sensor from the dataset. + """ + + collection_key = selection['collection_key'] + print('Clearing all labels of collection ' + collection_key + ' ...') + + # Extract xyz coordinates from the selected points + points_selected = pc2.read_points(selected_point_cloud) + gen_selected_points = list(points_selected) + + if len(gen_selected_points) == 0: # To prevent unnecessary operations. + return + + # sensor from the point cloud + # TODO only works for first pattern + first_pattern_key = list(dataset['calibration_config']['calibration_patterns'].keys())[0] + sensor = list(dataset['collections'][collection_key]['labels'][first_pattern_key].keys())[ + int(gen_selected_points[0][4])] + + dataset['collections'][collection_key]['labels'][first_pattern_key][sensor]['idxs'] = [] + dataset['collections'][collection_key]['labels'][first_pattern_key][sensor]['idxs_limit_points'] = [] + + print('Done.') diff --git a/atom_calibration/src/atom_calibration/odometry_inspector/visualization.py b/atom_calibration/src/atom_calibration/odometry_inspector/visualization.py new file mode 100644 index 00000000..9c496d2c --- /dev/null +++ b/atom_calibration/src/atom_calibration/odometry_inspector/visualization.py @@ -0,0 +1,903 @@ +""" +Reads a set of data and labels from a group of sensors in a json file and calibrates the poses of these sensors. +""" + +import copy +import math +import struct +from re import I +from tokenize import generate_tokens + +# 3rd-party +import cv2 +import cv_bridge +import numpy as np +import atom_core.ros_numpy + +# import numpy as np # TODO Eurico, line fails if I don't do this +import rospy +import sensor_msgs.point_cloud2 as pc2 +import tf +import tf2_ros +from atom_calibration.calibration.objective_function import * +from atom_calibration.collect.label_messages import * +from atom_calibration.odometry_inspector.depth_manual_labeling import drawLabelsOnImage, normalizeDepthImage +from atom_core.cache import Cache +from atom_core.system import execute +from atom_core.xacro_io import readXacroFile +from atom_core.dataset_io import ( + genCollectionPrefix, getCvImageFromDictionary, getCvImageFromDictionaryDepth, + getPointCloudMessageFromDictionary) +from atom_core.drawing import drawCross2D, drawSquare2D +from atom_core.naming import generateLabeledTopic, generateName +from atom_core.rospy_urdf_to_rviz_converter import urdfToMarkerArray +from atom_core.config_io import uriReader +from colorama import Fore, Style +from cv_bridge import CvBridge +from geometry_msgs.msg import Point, Pose, Quaternion, Transform, TransformStamped, Vector3 +from matplotlib import cm +from rospy_message_converter import message_converter +from sensor_msgs.msg import PointCloud2, PointField, sensor_msgs +from std_msgs.msg import ColorRGBA, Header, UInt8MultiArray +from urdf_parser_py.urdf import URDF + +# stdlib +from visualization_msgs.msg import Marker, MarkerArray + +# own packages + +# ------------------------------------------------------------------------------- +# --- FUNCTIONS +# ------------------------------------------------------------------------------- + + +def createDepthMarkers3D(dataset, sensor_key, collection_key, pattern_key, color=(255, 255, 0, 0.5), + stamp=None, cached=False, frame_id=None, namespace=None): + + if stamp is None: + stamp = rospy.Time.now() + + collection = dataset['collections'][collection_key] + + if frame_id is None: + frame_id = genCollectionPrefix( + collection_key, collection['data'][sensor_key]['header']['frame_id']) + + if namespace is None: + namespace = str(collection_key) + '-' + str(sensor_key) + + markers = MarkerArray() + # Add labelled points to the marker + marker = Marker( + header=Header(frame_id=frame_id, stamp=stamp), + ns=namespace, + id=0, frame_locked=True, type=Marker.SPHERE_LIST, action=Marker.ADD, lifetime=rospy.Duration(0), + pose=Pose(position=Point(x=0, y=0, z=0), orientation=Quaternion(x=0, y=0, z=0, w=1)), + scale=Vector3(x=0.01, y=0.01, z=0.01), + color=ColorRGBA(r=color[0], g=color[1], b=color[2], a=color[3])) + + if cached: + points = getPointsInDepthSensorAsNPArray( + collection_key, pattern_key, sensor_key, 'idxs', dataset) + else: + points = getPointsInDepthSensorAsNPArrayNonCached( + collection_key, pattern_key, sensor_key, 'idxs', dataset) + + for idx in range(0, points.shape[1]): + marker.points.append(Point(x=points[0, idx], y=points[1, idx], z=points[2, idx])) + + markers.markers.append(copy.deepcopy(marker)) + + # Add limit points to the marker, this time with larger cubes + marker = Marker( + header=Header(frame_id=frame_id, stamp=stamp), + ns=namespace + '-limit_points', id=0, frame_locked=True, type=Marker.CUBE_LIST, + action=Marker.ADD, lifetime=rospy.Duration(0), + pose=Pose( + position=Point(x=0, y=0, z=0), + orientation=Quaternion(x=0, y=0, z=0, w=1)), + scale=Vector3(x=0.05, y=0.05, z=0.05), + color=ColorRGBA(r=color[0], + g=color[1], + b=color[2], + a=color[3])) + + if cached: + points = getPointsInDepthSensorAsNPArray( + collection_key, pattern_key, sensor_key, 'idxs_limit_points', dataset) + else: + points = getPointsInDepthSensorAsNPArrayNonCached( + collection_key, pattern_key, sensor_key, 'idxs_limit_points', dataset) + + for idx in range(0, points.shape[1]): + marker.points.append(Point(x=points[0, idx], y=points[1, idx], z=points[2, idx])) + + markers.markers.append(copy.deepcopy(marker)) + + return markers + + +def getPointsInSensorAsNPArray_local(_collection_key, _sensor_key, _label_key, _dataset): + # TODO: #395 Daniel, we should you told me about this one but I would like to talk to you again ... although this function is somewhere else, in the other place it uses the dataset as cache... + cloud_msg = getPointCloudMessageFromDictionary( + _dataset['collections'][_collection_key]['data'][_sensor_key]) + idxs = _dataset['collections'][_collection_key]['labels'][_sensor_key][_label_key] + pc = atom_core.ros_numpy.numpify(cloud_msg)[idxs] + points = np.zeros((4, pc.shape[0])) + points[0, :] = pc['x'] + points[1, :] = pc['y'] + points[2, :] = pc['z'] + points[3, :] = 1 + return points + + +def getCvImageFromCollectionSensor(collection_key, sensor_key, dataset): + dictionary = dataset['collections'][collection_key]['data'][sensor_key] + return getCvImageFromDictionary(dictionary) + + +def getCvDepthImageFromCollectionSensor(collection_key, sensor_key, dataset, scale=1000.0): + dictionary = dataset['collections'][collection_key]['data'][sensor_key] + return getCvImageFromDictionaryDepth(dictionary, scale=scale) + + +def createPatternMarkers(frame_id, ns, collection_key, now, dataset, graphics): + markers = MarkerArray() + + # Draw pattern frame lines_sampled (top, left, right, bottom) + marker = Marker(header=Header(frame_id=frame_id, stamp=now), + ns=ns + '-frame_sampled', id=0, frame_locked=True, + type=Marker.CUBE_LIST, action=Marker.ADD, lifetime=rospy.Duration( + 0), + pose=Pose(position=Point(x=0, y=0, z=0), + orientation=Quaternion(x=0, y=0, z=0, w=1)), + scale=Vector3(x=0.01, y=0.01, z=0.01), + color=ColorRGBA(r=graphics['collections'][collection_key]['color'][0], + g=graphics['collections'][collection_key]['color'][1], + b=graphics['collections'][collection_key]['color'][2], a=1.0)) + + pts = [] + pts.extend(dataset['patterns']['frame']['lines_sampled']['left']) + pts.extend(dataset['patterns']['frame']['lines_sampled']['right']) + pts.extend(dataset['patterns']['frame']['lines_sampled']['top']) + pts.extend(dataset['patterns']['frame']['lines_sampled']['bottom']) + for pt in pts: + marker.points.append(Point(x=pt['x'], y=pt['y'], z=0)) + + markers.markers.append(marker) + + # Draw corners + marker = Marker(header=Header(frame_id=frame_id, stamp=now), + ns=ns + '-corners', id=0, frame_locked=True, + type=Marker.CUBE_LIST, action=Marker.ADD, lifetime=rospy.Duration( + 0), + pose=Pose(position=Point(x=0, y=0, z=0), + orientation=Quaternion(x=0, y=0, z=0, w=1)), + scale=Vector3(x=0.02, y=0.02, z=0.02), + color=ColorRGBA(r=graphics['collections'][collection_key]['color'][0], + g=graphics['collections'][collection_key]['color'][1], + b=graphics['collections'][collection_key]['color'][2], a=1.0)) + + for idx_corner, pt in enumerate(dataset['patterns']['corners']): + marker.points.append(Point(x=pt['x'], y=pt['y'], z=0)) + marker.colors.append(ColorRGBA(r=graphics['pattern']['colormap'][idx_corner, 0], + g=graphics['pattern']['colormap'][idx_corner, 1], + b=graphics['pattern']['colormap'][idx_corner, 2], a=1)) + + markers.markers.append(marker) + + # Draw transitions + # TODO we don't use this anymore, should we draw it? Perhaps it will be used for 2D Lidar ... + # marker = Marker(header=Header(frame_id=frame_id, stamp=now), + # ns=ns + '-transitions', id=0, frame_locked=True, + # type=Marker.POINTS, action=Marker.ADD, lifetime=rospy.Duration(0), + # pose=Pose(position=Point(x=0, y=0, z=0), orientation=Quaternion(x=0, y=0, z=0, w=1)), + # scale=Vector3(x=0.015, y=0.015, z=0), + # color=ColorRGBA(r=graphics['collections'][collection_key]['color'][0], + # g=graphics['collections'][collection_key]['color'][1], + # b=graphics['collections'][collection_key]['color'][2], a=1.0)) + # + # pts = dataset['patterns']['transitions']['vertical'] + # pts.extend(dataset['patterns']['transitions']['horizontal']) + # for pt in pts: + # marker.points.append(Point(x=pt['x'], y=pt['y'], z=0)) + # + # markers.markers.append(marker) + + # Draw the mesh, if one is provided + if not dataset['calibration_config']['calibration_pattern']['mesh_file'] == "": + # rgba = graphics['collections'][collection_key]['color'] + # color = ColorRGBA(r=rgba[0], g=rgba[1], b=rgba[2], a=1)) + + # print('Got the mesh it is: ' + dataset['calibration_config']['calibration_pattern']['mesh_file']) + m = Marker(header=Header(frame_id=frame_id, stamp=now), + ns=str(collection_key) + '-mesh', id=0, frame_locked=True, + type=Marker.MESH_RESOURCE, action=Marker.ADD, lifetime=rospy.Duration( + 0), + pose=Pose(position=Point(x=0, y=0, z=0), + orientation=Quaternion(x=0, y=0, z=0, w=1)), + scale=Vector3(x=1.0, y=1.0, z=1.0), + color=ColorRGBA(r=1, g=1, b=1, a=1)) + + mesh_file, _, _ = uriReader( + dataset['calibration_config']['calibration_pattern']['mesh_file']) + m.mesh_resource = 'file://' + mesh_file # mesh_resource needs uri format + m.mesh_use_embedded_materials = True + markers.markers.append(m) + + return markers # return markers + + +def setupVisualization(dataset, args, selected_collection_key): + """ + Creates the necessary variables in a dictionary "dataset_graphics", which will be passed onto the visualization + function + """ + + # Create a python dictionary that will contain all the visualization related information + graphics = {'colormap': {}, 'labels': {}, 'ros': {}, 'sensors': {}, 'args': args} + + # Parse xacro description file + description_file, _, _ = uriReader( + dataset['calibration_config']['description_file']) + rospy.loginfo('Reading description file ' + description_file + '...') + # TODO not sure this should be done because of the use_tfs functionality ... + xml_robot = readXacroFile(description_file) + + # Initialize ROS stuff + rospy.init_node("odometry_inspector") + # graphics['ros']['tf_broadcaster'] = tf.TransformBroadcaster() + graphics['ros']['tf_broadcaster'] = tf2_ros.TransformBroadcaster() + + # Sleep a litle to make sure the time.now() returns a correct time. + # rospy.sleep(0.2) + now = rospy.Time.now() + + graphics['ros']['publisher_models'] = rospy.Publisher( + '~robot_meshes', MarkerArray, queue_size=0, latch=True) + # Analyse xacro and figure out which transforms are static (always the same over the optimization), and which are + # not. For fixed we will use a static transform publisher. + # for collection_key, collection in dataset['collections'].items(): + # for transform_key, transform in collection['transforms'].items(): + # parent = transform['parent'] + # child = transform['child'] + # + # + # for sensor_key, sensor in dataset['sensors'].items(): # is the transformation being optimized? + # if joint.parent == sensor['calibration_parent'] and joint.child == sensor['calibration_child']: + # + # + # + # for joint in xml_robot.joints: + # if joint.parent == parent and joint.child == child: + # print(transform) + # print(joint) + # + # + # + # if joint.type == 'fixed': + # transform['fixed'] = True + # else: + # transform['fixed'] = False + + # Create colormaps to be used for coloring the elements. Each collection-pattern pair contains a color, + # each sensor-pattern pair contains a color likewise. + for pattern_key, pattern in dataset['calibration_config']['calibration_patterns'].items(): + # TODO change colormap between each pattern + graphics['colormap'][pattern_key] = {} + graphics['colormap'][pattern_key]['rgb'] = cm.gist_rainbow( + np.linspace(0, 1, pattern['dimension']['x'] * pattern['dimension']['y'])) + + graphics['colormap']['range'] = {} + # TODO change colormap between each pattern + graphics['colormap']['range']['full_colormap'] = cm.Pastel2( + np.linspace(0, 1, len(dataset['collections'].keys()))) + for idx, collection_key in enumerate(sorted(dataset['collections'].keys())): + graphics['colormap']['range'][collection_key] = { + 'color': graphics['colormap']['range']['full_colormap'][idx, :]} + + # Initialize the graphics['labels'] dictionary + for pattern_key, pattern in dataset['calibration_config']['calibration_patterns'].items(): + graphics['labels'][pattern_key] = {} + for sensor_key, sensor in dataset['sensors'].items(): + graphics['labels'][pattern_key][sensor_key] = {'collections': {}} + graphics['sensors'][sensor_key] = {'collections': {}} + for collection_key, collection in dataset['collections'].items(): + graphics['labels'][pattern_key][sensor_key]['collections'][collection_key] = {} + graphics['sensors'][sensor_key]['collections'][collection_key] = {} + + # ---------------------------------------------------------------------------------------- + # Create 2D Labels (only for rgb and depth) + # Note: Republish a new image at every visualization, because the labels must redrawn as they change position + # ---------------------------------------------------------------------------------------- + for sensor_key, sensor in dataset['sensors'].items(): + if sensor['modality'] in ['rgb', 'depth']: + # graphics['sensors'][sensor_key] = {} + + # Image msg setup. + labeled_topic = generateLabeledTopic(dataset['sensors'][sensor_key]['topic'], type='2d') + graphics['sensors'][sensor_key]['publisher'] = rospy.Publisher( + labeled_topic, sensor_msgs.msg.Image, queue_size=0, latch=True) + + # Camera info msg setup. + labeled_topic = generateLabeledTopic( + dataset['sensors'][sensor_key]['topic'], type='2d') + '/camera_info' + graphics['sensors'][sensor_key]['publisher_camera_info'] = rospy.Publisher( + labeled_topic, sensor_msgs.msg.CameraInfo, queue_size=0, latch=True) + + # ---------------------------------------------------------------------------------------- + # Create 3D Labels (only for reference sensor lidar3d or depth) + # Note: Republish new labeled point clouds because the labels must be redrawn as they change position + # ---------------------------------------------------------------------------------------- + graphics['PubPointCloudReference'] = dict() + for sensor_key, sensor in dataset['sensors'].items(): + # print('Sensor ' + sensor_key) + + if not sensor_key == args['reference_sensor']: + continue + + if sensor['modality'] not in ['lidar2d', 'lidar3d', 'depth']: + continue + + markers = MarkerArray() + + for collection_key, collection in dataset['collections'].items(): + + if sensor['modality'] == 'lidar3d': + # print('Entering modality 3d for sensor ' + sensor_key + ' collection ' + collection_key) + # Add labelled points to the marker + frame_id = genCollectionPrefix( + collection_key, collection['data'][sensor_key]['header']['frame_id']) + + # Add 3D lidar data + original_pointcloud_msg = getPointCloudMessageFromDictionary( + dataset['collections'][collection_key]['data'][sensor_key]) + + final_pointcloud_msg = PointCloud2( + header=Header(frame_id=frame_id, stamp=now), + height=original_pointcloud_msg.height, width=original_pointcloud_msg.width, + fields=original_pointcloud_msg.fields, + is_bigendian=original_pointcloud_msg.is_bigendian, + point_step=original_pointcloud_msg.point_step, + row_step=original_pointcloud_msg.row_step, data=original_pointcloud_msg.data, + is_dense=original_pointcloud_msg.is_dense) + + topic = generateLabeledTopic(dataset['sensors'][sensor_key]['topic'], type='3d') + topic += '_reference' + graphics['sensors'][sensor_key]['PubPointCloudReference'] = rospy.Publisher( + topic, PointCloud2, queue_size=0, latch=True) + graphics['sensors'][sensor_key]['collections'][collection_key][ + 'PointCloudsReference'] = final_pointcloud_msg + + # ---------------------------------------------------------------------------------------- + # Create 3D Labels (only for lidar3d and depth) + # Note: Republish new labeled point clouds because the labels must be redrawn as they change position + # ---------------------------------------------------------------------------------------- + + graphics['PubPointCloud'] = dict() + for sensor_key, sensor in dataset['sensors'].items(): + # print('Sensor ' + sensor_key) + + if sensor['modality'] not in ['lidar2d', 'lidar3d', 'depth']: + continue + + markers = MarkerArray() + + for collection_key, collection in dataset['collections'].items(): + # print('Collection ' + collection_key) + + # Removed because of https://github.com/lardemua/atom/issues/515 + # if not collection['labels'][str(sensor_key)]['detected']: # not detected by sensor in collection + # continue + + # when the sensor has no label, the 'idxs_limit_points' does not exist!!! + # TODO this is not correct I think ... + for pattern_key, pattern in dataset['calibration_config']['calibration_patterns'].items(): + if 'idxs_limit_points' not in collection['labels'][pattern_key][str(sensor_key)]: + collection['labels'][pattern_key][str(sensor_key)]['idxs_limit_points'] = [] + + if collection['labels'][pattern_key][str(sensor_key)]['idxs'] != []: + collection['labels'][pattern_key][str(sensor_key)]['detected'] = True + + # if sensor['msg_type'] == 'LaserScan': # -------- Publish the laser scan data ------------------------------ + if sensor['modality'] == 'lidar2d': + frame_id = genCollectionPrefix( + collection_key, collection['data'][sensor_key]['header']['frame_id']) + marker = Marker( + header=Header(frame_id=frame_id, stamp=now), + ns=str(collection_key) + '-' + str(sensor_key), + id=0, frame_locked=True, type=Marker.POINTS, action=Marker.ADD, + lifetime=rospy.Duration(0), + pose=Pose( + position=Point(x=0, y=0, z=0), + orientation=Quaternion(x=0, y=0, z=0, w=1)), + scale=Vector3(x=0.03, y=0.03, z=0), + color=ColorRGBA( + r=graphics['colormap']['range'][collection_key]['color'][0], + g=graphics['colormap']['range'][collection_key]['color'][1], + b=graphics['colormap']['range'][collection_key]['color'][2], + a=1.0)) + + # Get laser points that belong to the chessboard (labelled) + idxs = collection['labels'][pattern_key][sensor_key]['idxs'] + rhos = [collection['data'][sensor_key]['ranges'][idx] + for idx in idxs] + thetas = [collection['data'][sensor_key]['angle_min'] + + collection['data'][sensor_key]['angle_increment'] * idx for idx in idxs] + + for idx, (rho, theta) in enumerate(zip(rhos, thetas)): + marker.points.append( + Point(x=rho * math.cos(theta), y=rho * math.sin(theta), z=0)) + + markers.markers.append(copy.deepcopy(marker)) + + # Draw extrema points + marker.ns = str(collection_key) + '-' + str(sensor_key) + marker.type = Marker.SPHERE_LIST + marker.id = 1 + marker.scale.x = 0.1 + marker.scale.y = 0.1 + marker.scale.z = 0.1 + marker.color.a = 0.5 + marker.points = [marker.points[0], marker.points[-1]] + + markers.markers.append(copy.deepcopy(marker)) + + # Draw detected edges + marker.ns = str(collection_key) + '-' + str(sensor_key) + marker.type = Marker.CUBE_LIST + marker.id = 2 + marker.scale.x = 0.05 + marker.scale.y = 0.05 + marker.scale.z = 0.05 + marker.color.a = 0.5 + + marker.points = [] # Reset the list of marker points + # add edge points + for pattern_key in dataset['calibration_config']['calibration_patterns'].keys(): + for edge_idx in collection['labels'][pattern_key][sensor_key]['edge_idxs']: + p = Point() + p.x = rhos[edge_idx] * math.cos(thetas[edge_idx]) + p.y = rhos[edge_idx] * math.sin(thetas[edge_idx]) + p.z = 0 + marker.points.append(p) + markers.markers.append(copy.deepcopy(marker)) + + # if sensor['msg_type'] == 'PointCloud2': # -------- Publish the velodyne data ------------------------------ + elif sensor['modality'] == 'lidar3d': + + # print('Entering modality 3d for sensor ' + sensor_key + ' collection ' + collection_key) + # Add labelled points to the marker + frame_id = genCollectionPrefix( + collection_key, collection['data'][sensor_key]['header']['frame_id']) + + # Add 3D lidar data + original_pointcloud_msg = getPointCloudMessageFromDictionary( + dataset['collections'][collection_key]['data'][sensor_key]) + + # print('original_pointcloud_msg.width = ' + str(original_pointcloud_msg.width)) + # print('original_pointcloud_msg.height = ' + str(original_pointcloud_msg.height)) + + # exit(0) + final_pointcloud_msg = PointCloud2( + header=Header(frame_id=frame_id, stamp=now), + height=original_pointcloud_msg.height, width=original_pointcloud_msg.width, + fields=original_pointcloud_msg.fields, + is_bigendian=original_pointcloud_msg.is_bigendian, + point_step=original_pointcloud_msg.point_step, + row_step=original_pointcloud_msg.row_step, data=original_pointcloud_msg.data, + is_dense=original_pointcloud_msg.is_dense) + + labeled_topic = generateLabeledTopic( + dataset['sensors'][sensor_key]['topic'], type='3d') + graphics['sensors'][sensor_key]['PubPointCloud'] = rospy.Publisher( + labeled_topic, PointCloud2, queue_size=0, latch=True) + graphics['sensors'][sensor_key]['collections'][collection_key]['PointClouds'] = final_pointcloud_msg + + # 3D labels for depth sensors ------------------------------------------------------------------------ + # sensor_key + '-Labels-3D' + print('Creating 3D labels for depth sensors ...') + for sensor_key, sensor in dataset['sensors'].items(): + if sensor['modality'] not in ['depth']: # only depth sensors + continue + + # if not collection['labels'][sensor_key]['detected']: # not detected by sensor in collection + # continue + + # Create a single publisher for the sensor (will use this one for all collections) + labeled_topic = generateLabeledTopic(dataset['sensors'][sensor_key]['topic'], type='3d') + graphics['sensors'][sensor_key]['PubDepthMarkers'] = rospy.Publisher( + labeled_topic, MarkerArray, queue_size=0, latch=True) + + # ----------------------------------------------------------------------------------------------------- + # -------- Robot meshes + # ----------------------------------------------------------------------------------------------------- + graphics['ros']['robot_mesh_markers'] = {'collections': {}} + for collection_key, collection in dataset['collections'].items(): + rgba = [.5, .5, .5, 1] # best color we could find + markers = urdfToMarkerArray(xml_robot, frame_id_prefix=genCollectionPrefix( + collection_key, ''), namespace='robot_mesh', rgba=rgba) + graphics['ros']['robot_mesh_markers']['collections'][collection_key] = markers + + # Create LaserBeams Publisher ----------------------------------------------------------- + # This one is recomputed every time in the objective function, so just create the generic properties. + markers = MarkerArray() + + for collection_key, collection in dataset['collections'].items(): + for pattern_key, pattern in dataset['calibration_config']['calibration_patterns'].items(): + for sensor_key, sensor in dataset['sensors'].items(): + # chess not detected by sensor in collection + if not collection['labels'][pattern_key][sensor_key]['detected']: + continue + # if sensor['msg_type'] == 'LaserScan' or sensor['msg_type'] == 'PointCloud2': + if sensor['modality'] == 'lidar2d' or sensor['modality'] == 'lidar3d': + frame_id = genCollectionPrefix( + collection_key, collection['data'][sensor_key]['header']['frame_id']) + marker = Marker( + header=Header(frame_id=frame_id, stamp=rospy.Time.now()), + ns=str(collection_key) + '-' + str(sensor_key) + str(pattern_key), + id=0, frame_locked=True, type=Marker.LINE_LIST, action=Marker.ADD, + lifetime=rospy.Duration(0), + pose=Pose( + position=Point(x=0, y=0, z=0), + orientation=Quaternion(x=0, y=0, z=0, w=1)), + scale=Vector3(x=0.001, y=0, z=0), + color=ColorRGBA( + r=graphics['colormap']['range'][collection_key]['color'][0], + g=graphics['colormap']['range'][collection_key]['color'][1], + b=graphics['colormap']['range'][collection_key]['color'][2], + a=1.0)) + markers.markers.append(marker) + + graphics['ros']['MarkersLaserBeams'] = markers + graphics['ros']['PubLaserBeams'] = rospy.Publisher( + '~LaserBeams', MarkerArray, queue_size=0, latch=True) + + # Create Miscellaneous MarkerArray ----------------------------------------------------------- + markers = MarkerArray() + + # Text signaling the anchored sensor + for _sensor_key, sensor in dataset['sensors'].items(): + if _sensor_key == dataset['calibration_config']['anchored_sensor']: + marker = Marker( + header=Header(frame_id=str(_sensor_key), + stamp=now), + ns=str(_sensor_key), + id=0, frame_locked=True, type=Marker.TEXT_VIEW_FACING, action=Marker.ADD, + lifetime=rospy.Duration(0), + pose=Pose( + position=Point(x=0, y=0, z=0.2), + orientation=Quaternion(x=0, y=0, z=0, w=1)), + scale=Vector3(x=0.0, y=0.0, z=0.1), + color=ColorRGBA(r=0.6, g=0.6, b=0.6, a=1.0), + text='Anchored') + + markers.markers.append(marker) + + graphics['ros']['MarkersMiscellaneous'] = markers + graphics['ros']['PubMiscellaneous'] = rospy.Publisher( + '~Miscellaneous', MarkerArray, queue_size=0, latch=True) + # Publish only once in latched mode + graphics['ros']['PubMiscellaneous'].publish( + graphics['ros']['MarkersMiscellaneous']) + graphics['ros']['Rate'] = rospy.Rate(10) + graphics['ros']['Rate'] = rospy.Rate(10) + # tfs need to be published at high frequencies. On the other hand, robot markers + graphics['ros']['Counter'] = 0 + # should be published at low frequencies. This counter will serve to control this mechanism. + + return graphics + + +def visualizationFunction(models, selection, clicked_points=None): + # print(Fore.RED + 'Visualization function called.' + Style.RESET_ALL) + # Get the data from the meshes + dataset = models['dataset'] + args = models['args'] + collections = models['dataset']['collections'] + sensors = models['dataset']['sensors'] + config = models['dataset']['calibration_config'] + graphics = models['graphics'] + + selected_collection_key = selection['collection_key'] + previous_selected_collection_key = selection['previous_collection_key'] + reference_collection_key = args['reference_collection'] + + collection = dataset['collections'][selected_collection_key] + + # print("args['initial_pose_ghost'])" + str(args['initial_pose_ghost'])) + + now = rospy.Time.now() # time used to publish all visualization messages + + transfoms = [] + for collection_key, collection in collections.items(): + + # To have a fully connected tree, must connect the instances of the tf tree of every collection into a single + # tree. We do this by publishing an identity transform between the configured world link and hte world link + # of each collection. + parent = config['world_link'] + child = generateName(config['world_link'], prefix='c' + collection_key) + + transform = TransformStamped(header=Header(frame_id=parent, stamp=now), + child_frame_id=child, + transform=Transform(translation=Vector3(x=0, y=0, z=0), + rotation=Quaternion(x=0, y=0, z=0, w=1))) + transfoms.append(transform) + + for transform_key, transform in collection['transforms'].items(): + parent = generateName( + transform['parent'], prefix='c' + collection_key) + child = generateName( + transform['child'], prefix='c' + collection_key) + x, y, z = transform['trans'] + qx, qy, qz, qw = transform['quat'] + transform = TransformStamped( + header=Header(frame_id=parent, stamp=now), + child_frame_id=child, + transform=Transform( + translation=Vector3(x=x, y=y, z=z), + rotation=Quaternion(x=qx, y=qy, z=qz, w=qw))) + transfoms.append(transform) + + graphics['ros']['tf_broadcaster'].sendTransform(transfoms) + + # print("graphics['ros']['Counter'] = " + str(graphics['ros']['Counter'])) + if graphics['ros']['Counter'] < 5: + graphics['ros']['Counter'] += 1 + return None + else: + graphics['ros']['Counter'] = 0 + + # Update markers stamp, so that rviz uses newer transforms to compute their poses. + # for marker in graphics['ros']['robot_mesh_markers'].markers: + # marker.header.stamp = now + + # Publish the meshes + markers = graphics['ros']['robot_mesh_markers']['collections'][selected_collection_key] + for marker in markers.markers: + marker.header.stamp = now + + graphics['ros']['publisher_models'].publish(markers) + + # Update timestamp for labeled markers + # for sensor_key in graphics['ros']['sensors']: + # if not dataset['sensors'][sensor_key]['modality'] in ['lidar3d']: # TODO add depth and lidar2d + # continue + # + # for marker in graphics['ros']['sensors'][sensor_key]['collections'][selected_collection_key][ + # 'MarkersLabeled'].markers: + # marker.header.stamp = now + # + # Update timestamp for laser beams markers + for marker in graphics['ros']['MarkersLaserBeams'].markers: + marker.header.stamp = now + + # Publish 3d markers for depth sensors ------------------------------------------------------------------- + for pattern_key in dataset['calibration_config']['calibration_patterns'].keys(): + for sensor_key in graphics['labels'][pattern_key]: + if not dataset['sensors'][sensor_key]['modality'] in ['depth']: # only for depth sensors + continue + # print("Creating markers") + # print(sensor_key) + + # if not dataset['collections'][selected_collection_key]['labels'][sensor_key]['detected']: + # continue + # + # if not collection['labels'][sensor_key]['detected']: # not detected by sensor in collection + # continue + + color = (graphics['colormap']['range'][collection_key]['color'][0], + graphics['colormap']['range'][collection_key]['color'][1], + graphics['colormap']['range'][collection_key]['color'][2], 0.5) + markers = MarkerArray() + markers = createDepthMarkers3D( + dataset, sensor_key, selected_collection_key, pattern_key, color=color, stamp=None, + cached=False, frame_id=None, namespace=sensor_key) + + graphics['sensors'][sensor_key]['PubDepthMarkers'].publish(markers) + + # Update timestamp and colors for reference pointcloud2 message + for sensor_key in graphics['sensors']: + + if not sensor_key == args['reference_sensor']: + continue + + if not dataset['sensors'][sensor_key]['modality'] in ['lidar3d']: # TODO add lidar2d + continue + + # Create a new point cloud to publish which has the new label idxs in green, idxs_limits in dark green. + # Also, change intensity channel according to the idxs of the sensor + pointcloud_msg = graphics['sensors'][sensor_key]['collections'][reference_collection_key]['PointCloudsReference'] + pointcloud_msg.header.stamp = now # update time stamp. + + points = [] # Build a list of points. + for idx, point in enumerate(list(pc2.read_points(pointcloud_msg))): + + # colorize reference point cloud in blue + r, g, b = 0, 0, 200 + rgb = struct.unpack('I', struct.pack('BBBB', b, g, r, 255))[0] + + sensor_idx = list(dataset['collections'][reference_collection_key] + ['labels'][pattern_key].keys()).index(sensor_key) + + point_color = [point[0], point[1], point[2], idx, rgb, sensor_idx] + points.append(point_color) + + fields = [PointField('x', 0, PointField.FLOAT32, 1), + PointField('y', 4, PointField.FLOAT32, 1), + PointField('z', 8, PointField.FLOAT32, 1), + PointField('idx', 12, PointField.FLOAT32, 1), + PointField('rgb', 20, PointField.UINT32, 1), + PointField('sensor_idx', 24, PointField.FLOAT32, 1)] + pointcloud_msg_labeled = pc2.create_cloud(pointcloud_msg.header, fields, points) + + graphics['sensors'][sensor_key]['PubPointCloudReference'].publish(pointcloud_msg_labeled) + + # Update timestamp and colors for pointcloud2 message + for sensor_key in graphics['sensors']: + if not dataset['sensors'][sensor_key]['modality'] in ['lidar3d']: # TODO add lidar2d + continue + + # Create a new point cloud to publish which has the new label idxs in green, idxs_limits in dark green. + # Also, change intensity channel according to the idxs of the sensor + pointcloud_msg = graphics['sensors'][sensor_key]['collections'][selected_collection_key]['PointClouds'] + pointcloud_msg.header.stamp = now # update time stamp. + + points = [] # Build a list of points. + for pattern_key in dataset['calibration_config']['calibration_patterns'].keys(): + idxs = dataset['collections'][selected_collection_key]['labels'][pattern_key][ + sensor_key]['idxs'] + idxs_limit_points = dataset['collections'][selected_collection_key]['labels'][ + pattern_key][sensor_key]['idxs_limit_points'] + sensor_idx = list(dataset['collections'][selected_collection_key] + ['labels'][pattern_key].keys()).index(sensor_key) + + # print('pointcloud_msg.height=' + str(pointcloud_msg.height)) + # print('pointcloud_msg.width=' + str(pointcloud_msg.width)) + + for idx, point in enumerate(list(pc2.read_points(pointcloud_msg))): + + # Colorize points according to if they are labels or not + if idx in idxs_limit_points: # Dark green. + r, g, b = 50, 70, 20 + elif idx in idxs: # Green. + r, g, b = 100, 220, 20 + else: # Gray. + r, g, b = 186, 189, 182 + + rgb = struct.unpack('I', struct.pack('BBBB', b, g, r, 255))[0] + + point_color = [point[0], point[1], point[2], idx, rgb, sensor_idx] + points.append(point_color) + + fields = [PointField('x', 0, PointField.FLOAT32, 1), + PointField('y', 4, PointField.FLOAT32, 1), + PointField('z', 8, PointField.FLOAT32, 1), + PointField('idx', 12, PointField.FLOAT32, 1), + PointField('rgb', 20, PointField.UINT32, 1), + PointField('sensor_idx', 24, PointField.FLOAT32, 1)] + pointcloud_msg_labeled = pc2.create_cloud(pointcloud_msg.header, fields, points) + + graphics['sensors'][sensor_key]['PubPointCloud'].publish(pointcloud_msg_labeled) + + # Create a new marker array which contains only the marker related to the selected collection + # Publish the robot_mesh_ + marker_array_3 = MarkerArray() + for marker in graphics['ros']['MarkersLaserBeams'].markers: + prefix = marker.header.frame_id[:3] + if prefix == 'c' + str(selected_collection_key) + '_': + marker_array_3.markers.append(marker) + marker_array_3.markers[-1].action = Marker.ADD + elif not previous_selected_collection_key == selected_collection_key and prefix == 'c' + str( + previous_selected_collection_key) + '_': + marker_array_3.markers.append(marker) + marker_array_3.markers[-1].action = Marker.DELETE + + graphics['ros']['PubLaserBeams'].publish( + graphics['ros']['MarkersLaserBeams']) + + # Publish Annotated images + for sensor_key, sensor in sensors.items(): + # if sensor['msg_type'] == 'Image': + if sensor['modality'] == 'rgb': + if args['show_images']: + collection = collections[selected_collection_key] + image = copy.deepcopy(getCvImageFromCollectionSensor( + selected_collection_key, sensor_key, dataset)) + width = collection['data'][sensor_key]['width'] + height = collection['data'][sensor_key]['height'] + diagonal = math.sqrt(width ** 2 + height ** 2) + for pattern_key in dataset['calibration_config']['calibration_patterns'].keys(): + cm = graphics['colormap'][pattern_key]['rgb'] + + # Draw projected points (as dots) + # for idx, point in enumerate(collection['labels'][sensor_key]['idxs_projected']): + # x = int(round(point['x'])) + # y = int(round(point['y'])) + # color = (cm[idx, 2] * 255, cm[idx, 1] * 255, cm[idx, 0] * 255) + # cv2.line(image, (x, y), (x, y), color, int(6E-3 * diagonal)) + + # Draw ground truth points (as squares) + for idx, point in enumerate( + collection['labels'][pattern_key][sensor_key]['idxs']): + x = int(round(point['x'])) + y = int(round(point['y'])) + color = (cm[idx, 2] * 255, cm[idx, 1] + * 255, cm[idx, 0] * 255) + drawSquare2D(image, x, y, int(8E-3 * diagonal), + color=color, thickness=2) + + # Draw initial projected points (as crosses) + # for idx, point in enumerate(collection['labels'][sensor_key]['idxs_initial']): + # x = int(round(point['x'])) + # y = int(round(point['y'])) + # color = (cm[idx, 2] * 255, cm[idx, 1] * 255, cm[idx, 0] * 255) + # drawCross2D(image, x, y, int(8E-3 * diagonal), color=color, thickness=1) + + msg = CvBridge().cv2_to_imgmsg(image, "bgr8") + + msg.header.frame_id = 'c' + \ + selected_collection_key + '_' + sensor['parent'] + graphics['sensors'][sensor_key]['publisher'].publish(msg) + + # Publish camera info message + camera_info_msg = message_converter.convert_dictionary_to_ros_message( + 'sensor_msgs/CameraInfo', sensor['camera_info']) + camera_info_msg.header.frame_id = msg.header.frame_id + graphics['sensors'][sensor_key]['publisher_camera_info'].publish( + camera_info_msg) + + if sensor['modality'] == 'depth': + if args['show_images']: + + # Shortcut variables + collection = collections[selected_collection_key] + clicked_sensor_points = clicked_points[selected_collection_key][sensor_key][ + 'points'] + + # Create image to draw on top + image = getCvImageFromDictionaryDepth(collection['data'][sensor_key]) + gui_image = normalizeDepthImage(image, max_value=5) + for pattern_key in dataset['calibration_config']['calibration_patterns'].keys(): + gui_image = drawLabelsOnImage( + collection['labels'][pattern_key][sensor_key], gui_image) + + if not clicked_points[selected_collection_key][sensor_key]['valid_polygon']: + # Draw a cross for each point + for point in clicked_sensor_points: + drawSquare2D( + gui_image, point['x'], + point['y'], + size=7, color=(50, 190, 0), + thickness=2) + + # Draw a line segment for each pair of consecutive points + for point_start, point_end in zip( + clicked_sensor_points[: -1], + clicked_sensor_points[1:]): + cv2.line(gui_image, pt1=(point_start['x'], point_start['y']), + pt2=(point_end['x'], point_end['y']), + color=(0, 0, 255), thickness=2) + + msg = CvBridge().cv2_to_imgmsg(gui_image, "passthrough") + + msg.header.frame_id = 'c' + selected_collection_key + '_' + sensor['parent'] + graphics['sensors'][sensor_key]['publisher'].publish(msg) + + # Publish camera info message + camera_info_msg = message_converter.convert_dictionary_to_ros_message( + 'sensor_msgs/CameraInfo', sensor['camera_info']) + camera_info_msg.header.frame_id = msg.header.frame_id + graphics['sensors'][sensor_key]['publisher_camera_info'].publish( + camera_info_msg) + + # elif sensor['msg_type'] == 'LaserScan': + elif sensor['modality'] == 'lidar2d': + pass + # elif sensor['msg_type'] == 'PointCloud2': + elif sensor['modality'] == 'lidar3d': + pass + else: + pass + + graphics['ros']['Rate'].sleep() From e8d879d1c6df6bd0744d7a8cb1ad30d889ee6825 Mon Sep 17 00:00:00 2001 From: Miguel Oliveira Date: Sat, 13 Jul 2024 18:16:08 +0100 Subject: [PATCH 3/3] #967 functional. Manual only for now ... --- atom_calibration/scripts/odometry_inspector | 37 +- .../odometry_inspector/additional_tf.py | 392 ++++++++++++++++++ .../interactive_first_guess.py | 171 ++++++++ .../odometry_inspector/transformation_t.py | 100 +++++ 4 files changed, 690 insertions(+), 10 deletions(-) create mode 100644 atom_calibration/src/atom_calibration/odometry_inspector/additional_tf.py create mode 100644 atom_calibration/src/atom_calibration/odometry_inspector/interactive_first_guess.py create mode 100644 atom_calibration/src/atom_calibration/odometry_inspector/transformation_t.py diff --git a/atom_calibration/scripts/odometry_inspector b/atom_calibration/scripts/odometry_inspector index ab2cf31b..3fd5feac 100644 --- a/atom_calibration/scripts/odometry_inspector +++ b/atom_calibration/scripts/odometry_inspector @@ -25,6 +25,8 @@ from atom_calibration.odometry_inspector.visualization import setupVisualization from atom_core.dataset_io import (filterCollectionsFromDataset, filterSensorsFromDataset, loadResultsJSON, saveAtomDataset) +from atom_calibration.odometry_inspector.interactive_first_guess import InteractiveFirstGuess + # own packages @@ -141,12 +143,16 @@ def main(): ap.add_argument( "-rs", "--reference_sensor", help="", required=True) - ap.add_argument( - "-csf", "--collection_selection_function", default=None, type=str, - help="A string to be evaluated into a lambda function that receives a collection name as input and " - "returns True or False to indicate if the collection should be loaded (and used in the " - "optimization). The Syntax is lambda name: f(x), where f(x) is the function in python " - "language. Example: lambda name: int(name) > 5 , to load only collections 6, 7, and onward.") + ap.add_argument("-op", "--odometry_parent", help="", required=True) + ap.add_argument("-oc", "--odometry_child", help="", required=True) + ap.add_argument("-s", "--marker_scale", type=float, default=0.9, + help='Scale of the interactive markers.') + # ap.add_argument( + # "-csf", "--collection_selection_function", default=None, type=str, + # help="A string to be evaluated into a lambda function that receives a collection name as input and " + # "returns True or False to indicate if the collection should be loaded (and used in the " + # "optimization). The Syntax is lambda name: f(x), where f(x) is the function in python " + # "language. Example: lambda name: int(name) > 5 , to load only collections 6, 7, and onward.") # Roslaunch adds two arguments (__name and __log) that break our parser. Lets remove those. arglist = [x for x in sys.argv[1:] if not x.startswith('__')] @@ -159,7 +165,7 @@ def main(): args['show_images'] = True args['use_incomplete_collections'] = True args['remove_partial_detections'] = False - # args['collection_selection_function'] = None + args['collection_selection_function'] = None # --------------------------------------- # --- INITIALIZATION Read data from file @@ -181,9 +187,8 @@ def main(): dataset = filterSensorsFromDataset(dataset, args) # filter sensors - print( - 'Loaded dataset containing ' + str(len(dataset['sensors'].keys())) + ' sensors and ' + - str(len(dataset['collections'].keys())) + ' collections.') + print('Loaded dataset containing ' + str(len(dataset['sensors'].keys())) + ' sensors and ' + + str(len(dataset['collections'].keys())) + ' collections.') depth_mode = {'mode': 'detect'} @@ -256,6 +261,12 @@ def main(): depth_mode=depth_mode, output_file=output_file)) listener.start() + # --------------------------------------- + # Setting up interactive marker + # --------------------------------------- + first_guess = InteractiveFirstGuess(args, dataset, selection) + first_guess.init() + # --------------------------------------- # --- Loop while displaying selected collection # --------------------------------------- @@ -263,8 +274,14 @@ def main(): rate = rospy.Rate(10) # in hertz. tic = rospy.Time.now() + current_collection = 'this will not be a collection key' while not rospy.is_shutdown() and not selection['exit']: models = {'dataset': dataset, 'args': args, 'graphics': graphics} + if selection['collection_key'] != current_collection: + first_guess.additional_tf.updateAll() + first_guess.additional_tf.updateMarkers() + current_collection = selection['collection_key'] + visualizationFunction(models=models, selection=selection, clicked_points=clicked_points) rate.sleep() diff --git a/atom_calibration/src/atom_calibration/odometry_inspector/additional_tf.py b/atom_calibration/src/atom_calibration/odometry_inspector/additional_tf.py new file mode 100644 index 00000000..02939e45 --- /dev/null +++ b/atom_calibration/src/atom_calibration/odometry_inspector/additional_tf.py @@ -0,0 +1,392 @@ +#!/usr/bin/env python + +# stdlib +import copy +import os + +# 3rd-party +from atom_core.naming import generateKey +import atom_msgs.srv +import colorama +import numpy as np +import cv2 +import std_srvs.srv +import tf +import image_geometry +from atom_calibration.collect.label_messages import getFrustumMarkerArray +from interactive_markers.menu_handler import * +from visualization_msgs.msg import * +from tf.listener import TransformListener +from rospy_message_converter import message_converter + +# from transformation_t import TransformationT +from atom_calibration.initial_estimate.transformation_t import TransformationT + +# ------------------------ +# BASE CLASSES # +# ------------------------ + + +class MarkerPoseC: + def __init__(self, position, orientation, frame_id, child_frame_id): + self.position = position + self.orientation = orientation + self.frame_id = frame_id + self.child_frame_id = child_frame_id + + def __str__(self): + return str(self.position) + "\n" + str(self.orientation) + + __repr__ = __str__ + + +class Additional_tf: + + def __init__(self, name, server, global_menu_handler, frame_world, frame_opt_parent, + frame_opt_child, frame_additional_tf, additional_tf_color, marker_scale, + selection, dataset, listener=None): + print('Creating a new additional_tfs named ' + name) + self.name = name + self.visible = True + self.server = server + self.selection = selection + self.dataset = dataset + + # self.global_menu_handler = global_menu_handler + + self.menu_handler = MenuHandler() + if listener is not None: + self.listener = listener + else: + self.listener = TransformListener() + self.br = tf.TransformBroadcaster() + self.marker_scale = marker_scale + # transforms + self.world_link = frame_world + self.opt_parent_link = frame_opt_parent + self.opt_child_link = frame_opt_child + self.additional_tfs_link = frame_additional_tf + + # Frustum + self.color = additional_tf_color + + self.updateAll() # update all the transformations + # + # print('preT:\n' + str(self.preT)) + # print('optT:\n' + str(self.optT)) + # print('posT:\n' + str(self.posT)) + + # self.optTInitial = copy.deepcopy(self.optT) + self.createInteractiveMarker() # create interactive marker + print('Created interactive marker.') + + # Add service to make visible / invisible and set the scale + self.service_set_visible = rospy.Service('~' + self.name + + '/set_additional_tfs_interactive_marker', + atom_msgs.srv.SetAdditionalTfsInteractiveMarker, + self.callbackSetAdditionalTfsInteractiveMarker) + + # Add service to get the visible and scale + self.service_get_visible = rospy.Service('~' + self.name + + '/get_additional_tfs_interactive_marker', + atom_msgs.srv.GetAdditionalTfsInteractiveMarker, + self.callbackGetAdditionalTfsInteractiveMarker) + + # Start publishing now + self.timer_callback = rospy.Timer( + rospy.Duration(.1), + self.publishTFCallback) # to periodically broadcast + + def callbackGetAdditionalTfsInteractiveMarker(self, request): + + interactive_marker = self.server.get(self.name) + response = atom_msgs.srv.GetAdditionalTfsInteractiveMarkerResponse() + response.visible = self.visible + response.scale = interactive_marker.scale + return response + + def callbackSetAdditionalTfsInteractiveMarker(self, request): + + print('callbackSetAdditionalTfsInteractiveMarker service requested for additional_tfs ' + + colorama.Fore.BLUE + self.name + colorama.Style.RESET_ALL) + + interactive_marker = self.server.get(self.name) + interactive_marker.scale = request.scale + self.visible = request.visible # store visible state + + for control in interactive_marker.controls: + if self.visible == 1: + if 'move' in control.name: + control.interaction_mode = InteractiveMarkerControl.MOVE_AXIS + elif 'rotate' in control.name: + control.interaction_mode = InteractiveMarkerControl.ROTATE_AXIS + else: + control.interaction_mode = InteractiveMarkerControl.NONE + + self.server.insert(interactive_marker) + + self.server.applyChanges() + + response = atom_msgs.srv.SetAdditionalTfsInteractiveMarkerResponse() + response.success = 1 + response.message = 'Control changed.' + return response + + def resetToInitalPose(self, feedback=None): + print( + 'resetToInitialPose called for additional_tfs ' + colorama.Fore.BLUE + self.name + + colorama.Style.RESET_ALL) + self.optT.matrix = self.optTInitial.matrix + print('matrix=\n' + str(self.optT.matrix)) + + trans = self.optT.getTranslation() + self.marker.pose.position.x = trans[0] + self.marker.pose.position.y = trans[1] + self.marker.pose.position.z = trans[2] + quat = self.optT.getQuaternion() + self.marker.pose.orientation.x = quat[0] + self.marker.pose.orientation.y = quat[1] + self.marker.pose.orientation.z = quat[2] + self.marker.pose.orientation.w = quat[3] + + self.optTInitial = copy.deepcopy(self.optT) + + self.updateMarkers() + # self.global_menu_handler.reApply(self.server) + self.server.applyChanges() + + def publishTFCallback(self, _): + trans = self.optT.getTranslation() + quat = self.optT.getQuaternion() + self.br.sendTransform((trans[0], trans[1], trans[2]), + (quat[0], quat[1], quat[2], quat[3]), + rospy.Time.now(), self.opt_child_link, self.opt_parent_link) + + # if self.modality in ['rgb', 'depth']: + # self.publisher_frustum.publish(self.frustum_marker_array) + + def markerFeedback(self, feedback): + # print(' marker ' + self.name + ' received feedback') + + # print('feedback = ' + str(feedback)) + + self.optT.setTranslationFromPosePosition(feedback.pose.position) + self.optT.setQuaternionFromPoseQuaternion(feedback.pose.orientation) + + self.updateMarkers() + + # self.global_menu_handler.reApply(self.server) + self.server.applyChanges() + + def updateAll(self): + + self.updateOptT() + # print('Update all') + # print("self.optT") + # print(self.optT) + + def updateOptT(self): + + transform_key = generateKey(parent=self.opt_parent_link, child=self.opt_child_link) + quat = self.dataset['collections'][ + self.selection['collection_key']]['transforms'][transform_key]['quat'] + trans = self.dataset['collections'][ + self.selection['collection_key']]['transforms'][transform_key]['trans'] + + T = TransformationT(self.opt_parent_link, self.opt_child_link) + T.setTranslation(trans) + T.setQuaternion(quat) + + self.optT = T + + def updateMarkers(self): + + # print('updatedMarkers') + + trans = self.optT.getTranslation() + quat = self.optT.getQuaternion() + + marker = self.server.get(self.name) + marker.pose.position.x = trans[0] + marker.pose.position.y = trans[1] + marker.pose.position.z = trans[2] + marker.pose.orientation.x = quat[0] + marker.pose.orientation.y = quat[1] + marker.pose.orientation.z = quat[2] + marker.pose.orientation.w = quat[3] + self.server.insert(marker) + + marker = self.server.get(self.name + "_menu") + marker.pose.position.x = trans[0] + marker.pose.position.y = trans[1] + marker.pose.position.z = trans[2] + marker.pose.orientation.x = quat[0] + marker.pose.orientation.y = quat[1] + marker.pose.orientation.z = quat[2] + marker.pose.orientation.w = quat[3] + self.server.insert(marker) + + self.server.applyChanges() + + # Write new values to the dataset + quat = list(self.optT.getQuaternion()) + trans = list(self.optT.getTranslation()) + + transform_key = generateKey(parent=self.opt_parent_link, child=self.opt_child_link) + self.dataset['collections'][self.selection['collection_key'] + ]['transforms'][transform_key]['quat'] = quat + self.dataset['collections'][self.selection['collection_key'] + ]['transforms'][transform_key]['trans'] = trans + + def createInteractiveMarker(self): + self.marker = InteractiveMarker() + self.marker.header.frame_id = self.opt_parent_link + trans = self.optT.getTranslation() + self.marker.pose.position.x = trans[0] + self.marker.pose.position.y = trans[1] + self.marker.pose.position.z = trans[2] + quat = self.optT.getQuaternion() + self.marker.pose.orientation.x = quat[0] + self.marker.pose.orientation.y = quat[1] + self.marker.pose.orientation.z = quat[2] + self.marker.pose.orientation.w = quat[3] + self.marker.scale = self.marker_scale + + self.marker.name = self.name + self.marker.description = self.name + '_control' + + # insert a box + control = InteractiveMarkerControl() + control.always_visible = True + + marker_box = Marker() + marker_box.type = Marker.SPHERE + marker_box.scale.x = self.marker.scale * 0.3 + marker_box.scale.y = self.marker.scale * 0.3 + marker_box.scale.z = self.marker.scale * 0.3 + marker_box.color.r = 0 + marker_box.color.g = 1 + marker_box.color.b = 0 + marker_box.color.a = 0.2 + + control.markers.append(marker_box) + self.marker.controls.append(control) + + self.marker.controls[0].interaction_mode = InteractiveMarkerControl.MOVE_ROTATE_3D + + # control = InteractiveMarkerControl() + # control.orientation.w = 1 + # control.orientation.x = 1 + # control.orientation.y = 0 + # control.orientation.z = 0 + # control.name = "rotate_x" + # control.interaction_mode = InteractiveMarkerControl.ROTATE_AXIS + # control.orientation_mode = InteractiveMarkerControl.FIXED + # self.marker.controls.append(control) + + control = InteractiveMarkerControl() + control.orientation.w = 1 + control.orientation.x = 1 + control.orientation.y = 0 + control.orientation.z = 0 + control.name = "move_x" + control.interaction_mode = InteractiveMarkerControl.MOVE_AXIS + control.orientation_mode = InteractiveMarkerControl.FIXED + self.marker.controls.append(control) + + control = InteractiveMarkerControl() + control.orientation.w = 1 + control.orientation.x = 0 + control.orientation.y = 0 + control.orientation.z = 1 + control.name = "move_y" + control.interaction_mode = InteractiveMarkerControl.MOVE_AXIS + control.orientation_mode = InteractiveMarkerControl.FIXED + self.marker.controls.append(control) + + control = InteractiveMarkerControl() + control.orientation.w = 1 + control.orientation.x = 0 + control.orientation.y = 1 + control.orientation.z = 0 + control.name = "rotate_z" + control.interaction_mode = InteractiveMarkerControl.MOVE_ROTATE + control.orientation_mode = InteractiveMarkerControl.FIXED + self.marker.controls.append(control) + + control = InteractiveMarkerControl() + control.orientation.w = 1 + control.orientation.x = 0 + control.orientation.y = 1 + control.orientation.z = 0 + control.name = "rotate_z" + control.interaction_mode = InteractiveMarkerControl.ROTATE_AXIS + control.orientation_mode = InteractiveMarkerControl.FIXED + self.marker.controls.append(control) + + # control = InteractiveMarkerControl() + # control.orientation.w = 1 + # control.orientation.x = 0 + # control.orientation.y = 1 + # control.orientation.z = 0 + # control.name = "move_z" + # control.interaction_mode = InteractiveMarkerControl.MOVE_AXIS + # control.orientation_mode = InteractiveMarkerControl.FIXED + # self.marker.controls.append(control) + + # control = InteractiveMarkerControl() + # control.orientation.w = 1 + # control.orientation.x = 0 + # control.orientation.y = 0 + # control.orientation.z = 1 + # control.name = "rotate_y" + # control.interaction_mode = InteractiveMarkerControl.ROTATE_AXIS + # control.orientation_mode = InteractiveMarkerControl.FIXED + # self.marker.controls.append(control) + + self.server.insert(self.marker, self.markerFeedback) + + # Menu interactive marker + int_marker = InteractiveMarker() + int_marker.header.frame_id = self.opt_parent_link + + int_marker.pose.position.x = trans[0] + int_marker.pose.position.y = trans[1] + int_marker.pose.position.z = trans[2] + + int_marker.pose.orientation.x = quat[0] + int_marker.pose.orientation.y = quat[1] + int_marker.pose.orientation.z = quat[2] + int_marker.pose.orientation.w = quat[3] + int_marker.scale = self.marker_scale + + int_marker.name = self.name + "_menu" + int_marker.description = "Right click()" + + # make one control using default visuals + control = InteractiveMarkerControl() + control.interaction_mode = InteractiveMarkerControl.MENU + control.description = "-" + control.name = "menu_only_control" + int_marker.controls.append(copy.deepcopy(control)) + + # make one control showing a sphere + marker = Marker() + marker.type = Marker.SPHERE + marker.scale.x = self.marker_scale * 0.35 + marker.scale.y = self.marker_scale * 0.35 + marker.scale.z = self.marker_scale * 0.35 + marker_box.color.r = 0 + marker_box.color.g = 1 + marker_box.color.b = 0 + marker_box.color.a = 0.2 + control.markers.append(marker) + control.always_visible = True + int_marker.controls.append(control) + + self.server.insert(int_marker, self.resetToInitalPose) + + self.menu_handler.insert( + "Reset " + self.name + " to initial configuration", callback=self.resetToInitalPose) + self.menu_handler.reApply(self.server) + self.menu_handler.apply(self.server, int_marker.name) diff --git a/atom_calibration/src/atom_calibration/odometry_inspector/interactive_first_guess.py b/atom_calibration/src/atom_calibration/odometry_inspector/interactive_first_guess.py new file mode 100644 index 00000000..591e0b33 --- /dev/null +++ b/atom_calibration/src/atom_calibration/odometry_inspector/interactive_first_guess.py @@ -0,0 +1,171 @@ + +# stdlib +import sys +import argparse +import time + +import numpy as np + +# 3rd-party +import rospkg +import rospy +from colorama import Fore, Style +from matplotlib import cm +from interactive_markers.interactive_marker_server import InteractiveMarkerServer +from interactive_markers.menu_handler import MenuHandler +from visualization_msgs.msg import InteractiveMarker, InteractiveMarkerControl, Marker +from urdf_parser_py.urdf import URDF +from urdf_parser_py.urdf import Pose as URDFPose + +# local packages +from atom_core.ros_utils import filterLaunchArguments +from atom_core.config_io import loadConfig +from atom_calibration.odometry_inspector.additional_tf import Additional_tf +from tf.listener import TransformListener + + +class InteractiveFirstGuess(object): + + def __init__(self, args, dataset, selection): + + self.args = args # command line arguments + self.dataset = dataset + self.selection = selection + + self.server = None # interactive markers server + self.menu = None + + def init(self): + + # Init interaction + self.server = InteractiveMarkerServer('set_odometry_inspector') + self.menu = MenuHandler() + + self.createInteractiveMarker(self.dataset['calibration_config']['world_link']) + + # sensors and additional tfs to be calibrated + self.menu.insert("Save odometry transformations", + callback=self.onSaveInitialEstimate) + self.menu.insert("Reset to initial values", + callback=self.onResetAll) + + self.menu.reApply(self.server) + + # Generate an interactive marker. + params = {"frame_world": self.dataset['calibration_config']['world_link'], + "frame_opt_parent": self.args['odometry_parent'], + "frame_opt_child": self.args['odometry_child'], + "frame_additional_tf": self.args['odometry_child'], + "additional_tf_color": (0, 0, 1, 1), + "marker_scale": self.args['marker_scale'], + "selection": self.selection, + "dataset": self.dataset} + + # Append to the list of additional_tfs + self.additional_tf = Additional_tf('tf', self.server, self.menu, **params) + print('... done') + + self.server.applyChanges() + + def onSaveInitialEstimate(self, feedback): + print('onSaveInitialEstimate') + return + + if self.checkAdditionalTfs(): + for additional_tfs in self.additional_tfs: + # find corresponding joint for this additional_tfs + for joint in self.urdf.joints: + if additional_tfs.opt_child_link == joint.child and additional_tfs.opt_parent_link == joint.parent: + trans = additional_tfs.optT.getTranslation() + euler = additional_tfs.optT.getEulerAngles() + joint.origin.xyz = list(trans) + joint.origin.rpy = list(euler) + + # Write the urdf file with atom_calibration's + # source path as base directory. + rospack = rospkg.RosPack() + # outfile = rospack.get_path('atom_calibration') + os.path.abspath('/' + self.args['filename']) + outfile = self.args['filename'] + with open(outfile, 'w') as out: + print("Writing first guess urdf to '{}'".format(outfile)) + out.write(self.urdf.to_xml_string()) + + def onResetAll(self, feedback): + + return + if self.checkAdditionalTfs(): + for additional_tfs in self.additional_tfs: + additional_tfs.resetToInitalPose() + + def createInteractiveMarker(self, world_link): + marker = InteractiveMarker() + marker.header.frame_id = world_link + trans = (1, 0, 1) + marker.pose.position.x = trans[0] + marker.pose.position.y = trans[1] + marker.pose.position.z = trans[2] + quat = (0, 0, 0, 1) + marker.pose.orientation.x = quat[0] + marker.pose.orientation.y = quat[1] + marker.pose.orientation.z = quat[2] + marker.pose.orientation.w = quat[3] + marker.scale = 0.2 + + marker.name = 'menu' + marker.description = 'menu' + + # insert a box + control = InteractiveMarkerControl() + control.always_visible = True + + marker_box = Marker() + marker_box.type = Marker.SPHERE + marker_box.scale.x = marker.scale * 0.7 + marker_box.scale.y = marker.scale * 0.7 + marker_box.scale.z = marker.scale * 0.7 + marker_box.color.r = 0 + marker_box.color.g = 1 + marker_box.color.b = 0 + marker_box.color.a = 0.2 + + control.markers.append(marker_box) + marker.controls.append(control) + + marker.controls[0].interaction_mode = InteractiveMarkerControl.MOVE_3D + + control = InteractiveMarkerControl() + control.orientation.w = 1 + control.orientation.x = 1 + control.orientation.y = 0 + control.orientation.z = 0 + control.name = "move_x" + control.interaction_mode = InteractiveMarkerControl.MOVE_AXIS + control.orientation_mode = InteractiveMarkerControl.FIXED + marker.controls.append(control) + + control = InteractiveMarkerControl() + control.orientation.w = 1 + control.orientation.x = 0 + control.orientation.y = 1 + control.orientation.z = 0 + control.name = "move_z" + control.interaction_mode = InteractiveMarkerControl.MOVE_AXIS + control.orientation_mode = InteractiveMarkerControl.FIXED + marker.controls.append(control) + + control = InteractiveMarkerControl() + control.orientation.w = 1 + control.orientation.x = 0 + control.orientation.y = 0 + control.orientation.z = 1 + control.name = "move_y" + control.interaction_mode = InteractiveMarkerControl.MOVE_AXIS + control.orientation_mode = InteractiveMarkerControl.FIXED + marker.controls.append(control) + + self.server.insert(marker, self.onSaveInitialEstimate) + + self.menu.apply(self.server, marker.name) + + def checkAdditionalTfs(self): + return self.config['additional_tfs'] is not None diff --git a/atom_calibration/src/atom_calibration/odometry_inspector/transformation_t.py b/atom_calibration/src/atom_calibration/odometry_inspector/transformation_t.py new file mode 100644 index 00000000..5c7da4c8 --- /dev/null +++ b/atom_calibration/src/atom_calibration/odometry_inspector/transformation_t.py @@ -0,0 +1,100 @@ +#!/usr/bin/env python + +# stdlib +import copy + +# 3rd-party +import numpy as np +import cv2 +import tf +from interactive_markers.menu_handler import * +from visualization_msgs.msg import * +from tf.listener import TransformListener + +# ------------------------ +# BASE CLASSES # +# ------------------------ + +class TransformationT(): + """ Base class for geometric transformation + """ + + def __init__(self, parent_frame_id, frame_id): + self.parent_frame_id = parent_frame_id + self.frame_id = frame_id + self.matrix = np.identity(4, dtype=float) + + self.stamp = rospy.Time.now() + + def __str__(self): + return 'Transform from ' + str(self.parent_frame_id) + ' to ' + str(self.frame_id) + ' at time ' + str( + self.stamp) + '\n' + str(self.matrix) + + def getTranslation(self, homogeneous=False): + if homogeneous: + return self.matrix[0:4, 3] + else: + return self.matrix[0:3, 3] + + def setTranslation(self, value): + self.matrix[0:3, 3] = value + + def setTranslationFromPosePosition(self, trans): + self.matrix[0, 3] = trans.x + self.matrix[1, 3] = trans.y + self.matrix[2, 3] = trans.z + + def getRotation(self): + return self.matrix[0:3, 0:3] + + def setRotation(self, matrix): + self.matrix[0:3, 0:3] = matrix + + def setQuaternion(self, q): + matrix = copy.deepcopy(tf.transformations.quaternion_matrix(q)) + self.matrix[0:3, 0:3] = matrix[0:3, 0:3] + + def setQuaternionFromPoseQuaternion(self, pose_q): + q = (pose_q.x, pose_q.y, pose_q.z, pose_q.w) + matrix = copy.deepcopy(tf.transformations.quaternion_matrix(q)) + self.matrix[0:3, 0:3] = matrix[0:3, 0:3] + + def getQuaternion(self): + m = copy.deepcopy(self.matrix) + m[0, 3] = 0 + m[1, 3] = 0 + m[2, 3] = 0 + return tf.transformations.quaternion_from_matrix(m) + + def getEulerAngles(self): + return tf.transformations.euler_from_matrix(self.matrix) + + def setRodrigues(self, matrix): + self.setRotation(self.rodriguesToMatrix(matrix)) + + def getRodrigues(self): + return self.matrixToRodrigues(self.getRotation()) + + def matrixToRodrigues(self, matrix): + rods, _ = cv2.Rodrigues(matrix[0:3, 0:3]) + rods = rods.transpose() + rodrigues = rods[0] + return rodrigues + + def rodriguesToMatrix(self, r): + rod = np.array(r, dtype=float) + matrix = cv2.Rodrigues(rod) + return matrix[0] + + +class MarkerPoseC: + def __init__(self, position, orientation, frame_id, child_frame_id): + self.position = position + self.orientation = orientation + self.frame_id = frame_id + self.child_frame_id = child_frame_id + + def __str__(self): + return str(self.position) + "\n" + str(self.orientation) + + __repr__ = __str__