From ed21cd18af18c1d62f1a389036f8f737bde9c248 Mon Sep 17 00:00:00 2001 From: "W. Jacob Wagner" Date: Thu, 2 May 2024 22:18:00 -0500 Subject: [PATCH 1/7] WIP: Adding SensorProcessor class to handle propagation of uncertianty. --- .../SensorProcessors.py | 335 ++++++++++++++++++ 1 file changed, 335 insertions(+) create mode 100644 elevation_mapping_cupy/script/elevation_mapping_cupy/SensorProcessors.py diff --git a/elevation_mapping_cupy/script/elevation_mapping_cupy/SensorProcessors.py b/elevation_mapping_cupy/script/elevation_mapping_cupy/SensorProcessors.py new file mode 100644 index 00000000..fea2f80a --- /dev/null +++ b/elevation_mapping_cupy/script/elevation_mapping_cupy/SensorProcessors.py @@ -0,0 +1,335 @@ +# +# Copyright (c) 2024, W. Jacob Wagner. All rights reserved. +# Licensed under the MIT license. See LICENSE file in the project root for details. +# +import numpy as np +import cupy as cp + + +class SensorProcessor(object): + """ + Class for processing sensor data. + Currenlty restricted to point cloud data. + Mainly used for sensor dependent propagation of uncertainty. + """ + def __init__(self, sensor_ID, C_BS, B_r_BS, Sigma_Theta_BS, Sigma_b_r_BS, noise_model_name, noise_model_params, xp=cp): + """Initialize sensor processor for a specific sensor. + + Args: + sensor_ID (str): Sensor ID. Should be unique for each instance of a sensor + C_BS (cupy._core.core.ndarray): Matrix representing relative orientation of sensor frame in body frame. + (takes points from sensor frame and transforms them to body frame) + B_r_BS (cupy._core.core.ndarray): Position of sensor in body frame. + Sigma_Theta_BS (cupy._core.core.ndarray): Covariance of sensor orientation where orientation is defined + as fixed-axis(extrinsic) xyz(roll, pitch, yaw) Euler angles. + Sigma_b_r_BS (cupy._core.core.ndarray): Covariance of sensor position in base frame. + noise_model_name (str): Noise model of the sensor. Either "SLS" or "LiDAR". + noise_model_params (dict): Parameters for the noise model. + xp (module): Numpy or Cupy module. + """ + self.sensor_ID = sensor_ID + assert C_BS.shape == (3, 3), "C_BS should be a 3x3 matrix" + self.C_BS = C_BS + self.B_r_BS = self.make_3x1vec(B_r_BS) + assert Sigma_Theta_BS.shape == (3, 3), "Sigma_Theta_BS should be a 3x3 matrix" + self.Sigma_Theta_BS = Sigma_Theta_BS + assert Sigma_b_r_BS.shape == (3, 3), "Sigma_b_r_BS should be a 3x3 matrix" + self.Sigma_b_r_BS = Sigma_b_r_BS + self.noise_models = {"SLS": self.SLS_noise_model, "LiDAR": self.LiDAR_noise_model} # Structured Light Sensor, LiDAR + assert noise_model_name in self.noise_models.keys(), "noise_model should be chosen from {}".format(self.noise_models.keys()) + self.noise_model_name = noise_model_name + self.noise_model = self.noise_models[self.noise_model_name] + self.noise_model_params = noise_model_params + # Make sure params are compatible with cupy + for key in self.noise_model_params.keys(): + self.noise_model_params[key] = xp.array(self.noise_model_params[key]) + self.xp = xp + + def make_3x1vec(self, vec): + """Make sure vector is a 3x1 vector""" + if vec.shape == (3,): + vec = vec[:, np.newaxis] + assert vec.shape == (3, 1), "Vector should be a 3x1 vector" + return vec + + def SS(self, v): + """Skew symmetric matrix of a vector v""" + C = self.xp.zeros((3, 3)) + C[0, 1] = -v[2] + C[0, 2] = v[1] + C[1, 0] = v[2] + C[1, 2] = -v[0] + C[2, 0] = -v[1] + C[2, 1] = v[0] + return C + # Implementing this way instead of below because of cupy compatibility + # return self.xp.array([[0, -v[2], v[1]], + # [v[2], 0, -v[0]], + # [-v[1], v[0], 0]]) + def ei(self, i): + """Basis vector ei""" + return self.xp.array([1 if j == i else 0 for j in range(3)]) + + def Rx(self, a): + """Rotation matrix around x-axis""" + C = self.xp.eye(3) + C[1:, 1:] = self.xp.array( [[self.xp.cos(a), -self.xp.sin(a)], + [self.xp.sin(a), self.xp.cos(a)]]) + return C + # Implementing this way instead of below because of cupy compatibility + # return self.xp.array([[1, 0, 0], + # [0, self.xp.cos(a), -self.xp.sin(a)], + # [0, self.xp.sin(a), self.xp.cos(a)]]) + + def Ry(self, b): + """Rotation matrix around y-axis""" + C = self.xp.eye(3) + C[0, 0] = self.xp.cos(b) + C[0, 2] = self.xp.sin(b) + C[2, 0] = -self.xp.sin(b) + C[2, 2] = self.xp.cos(b) + return C + # Implementing this way instead of below because of cupy compatibility + # return self.xp.array([[self.xp.cos(b), 0, self.xp.sin(b)], + # [0, 1, 0], + # [-self.xp.sin(b), 0, self.xp.cos(b)]]) + + def Rz(self, c): + """Rotation matrix around z-axis""" + C = self.xp.eye(3) + C[:2, :2] = self.xp.array( [[self.xp.cos(c), -self.xp.sin(c)], + [self.xp.sin(c), self.xp.cos(c)]]) + return C + # Implementing this way instead of below because of cupy compatibility + # return self.xp.array([[self.xp.cos(c), -self.xp.sin(c), 0], + # [self.xp.sin(c), self.xp.cos(c), 0], + # [0, 0, 1]]) + + def diag_array_to_stacks(self, d): + """Converts a 2D array where each row defines a diagonal into a 3D array of diagonal matrices + + Args: + d: A 2D numpy array where each row defines a diagonal for a new matrix + + Returns: + A 3D numpy array where D[:,:,i] is a diagonal matrix with d[i,:] on the diagonal + """ + # TODO: Figure out how to do this without a for loop + return self.xp.stack([self.xp.diag(val) for val in d], axis=0) + + def SLS_noise_model(self, S_r_SP, C_MB): + """ + Structured Light Sensor noise model. + Based on the paper: + "Kinect v2 for mobile robot navigation: Evaluation and modeling" by + P. Fankhauser, M. Bloesch, D. Rodriguez, R. Kaestner, M. Hutter, R. Siegwart, + ICAR 2015. + The axis convenciton assumes is z axis forward, x axis right, and y axis down. + Using the model for sensor noise of the Kinect v2. + sigma_l = a + sigma_z = b - c * z + d * z^2(1 + e * cos(alpha)) + f * z^(3/2) * (theta^2)/(pi/2-theta)^2 + Assume theta and alpha are 0 this reduces to: + sigma_z = b - c * z + 2 * d * z^2 + where z is the distance of the point from the sensor along the z axis of the sensor frame in meters, + theta is the angle of the target with repsect to the z axis of the sensor frame (assumed to be 0), + alpha is the angle of incidence of the light on the target (assumed to be 0), + and a, b, c, d, e, and f are postive parameters that are determined experimentally. + sigma_z and sigma_l are the standard deviations (in meters) of the noise in the z direction and the lateral direction. + """ + sigma_l = self.noise_model_params["a"] + z = S_r_SP[:,2] + N = S_r_SP.shape[0] + sigma_z = self.noise_model_params["b"] - self.noise_model_params["c"] * z + 2 * self.noise_model_params["d"] * z**2 + # Repeat sigma_l for each point + sigma_l = self.xp.repeat(sigma_l, N) + # Sigma_S_r_SP = self.xp.diag(self.xp.array([sigma_l**2, sigma_l**2, sigma_z**2])) + Sigma_S_r_SP = self.diag_array_to_stacks(self.xp.array([sigma_l**2, sigma_l**2, sigma_z**2]).T) + J_S_r_SP = C_MB @ self.C_BS + + return J_S_r_SP, Sigma_S_r_SP + + def LiDAR_noise_model(self, points, C_MB): + raise NotImplementedError("LiDAR noise model not implemented yet") + + # TODO: May need to make this its own kernel... + def get_ext_euler_angles(self, C): + """ + Extract Extrinsically defined Euler angles from rotation matrix + The transformation matrix C in terms of + extrinsically defined Euler angles (roll, pitch, yaw) + is defined as C = Rz(yaw) @ Ry(pitch) @ Rx(roll) + i.e. first roll about x, then pitch about y, and then yaw about z (in that order) + i.e. the rotation matrix is defined as a sequence of + rotations about the axes of the original coordinate + system where the axes are fixed in space. + For reference Basic rotation matrices are defined as: + Rx = lambda a: xp.array([[1, 0, 0], + [0, xp.cos(a), xp.sin(a)], + [0, -xp.sin(a), xp.cos(a)]]) + + Ry = lambda b: xp.array([[xp.cos(b), 0, -xp.sin(b)], + [0, 1, 0], + [xp.sin(b), 0, xp.cos(b)]]) + + Rz = lambda c: xp.array([[xp.cos(c), xp.sin(c), 0], + [-xp.sin(c), xp.cos(c), 0], + [0, 0, 1]]) + Note that this may be refered to as Extrinsic Tait-Bryan angles in x-y-z order. + Using Reference: Computing Euler Angles from a Rotation Matrix by Gregory G. Slabaugh + https://eecs.qmul.ac.uk/~gslabaugh/publications/euler.pdf + + 2 solutions for these Extrinsic Tait-Bryan angles always exist. + The Eigen geometry module function eulerAngles(ax1,ax2,ax3) returns the intrinsically + defined Euler angles (a,b,c) applied in the order ax1, ax2, ax3. and ensures that the angles + (a,b,c) are in the ranges [0:pi]x[-pi:pi]x[-pi:pi]. + (https://eigen.tuxfamily.org/dox/group__Geometry__Module.html#title20) + In order to obtain Extrinsic Tait-Bryan angles applied in the order x-y-z, robot_localization + uses the Eigen function y,p,r = eulerAngles(2,1,0) + (https://github.com/cra-ros-pkg/robot_localization/blob/49aab740c0b66c9f266141522e86d64dc86c8939/src/ros_robot_localization_listener.cpp#L456) + which means that the orientation state being tracked by robot_localization + (r,p,y) are in [-pi:pi]x[-pi:pi]x[0:pi]. + Given this assumption we can select the correct set of angles + """ + if self.xp.abs(C[2, 0]) != 1.0: + pitch1 = -self.xp.arcsin(C[2,0]) # Different compared to paper due to + pitch2 = self.xp.pi - pitch1 + roll1 = self.xp.arctan2(C[2,1]/self.xp.cos(pitch1), C[2,2]/self.xp.cos(pitch1)) + roll2 = self.xp.arctan2(C[2,1]/self.xp.cos(pitch2), C[2,2]/self.xp.cos(pitch2)) + yaw1 = self.xp.arctan2(C[1,0]/self.xp.cos(pitch1), C[0,0]/self.xp.cos(pitch1)) + yaw2 = self.xp.arctan2(C[1,0]/self.xp.cos(pitch2), C[0,0]/self.xp.cos(pitch2)) + # Select the correct set of angles + a1_valid = roll1 >= -self.xp.pi and roll1 <= self.xp.pi + a1_valid = a1_valid and (pitch1 >= -self.xp.pi and pitch1 <= self.xp.pi) + a1_valid = a1_valid and (yaw1 >= 0 and yaw1 <= self.xp.pi) + a2_valid = roll2 >= -self.xp.pi and roll2 <= self.xp.pi + a2_valid = a2_valid and (pitch2 >= -self.xp.pi and pitch2 <= self.xp.pi) + a2_valid = a2_valid and (yaw2 >= 0 and yaw2 <= self.xp.pi) + if a1_valid: + roll, pitch, yaw = roll1, pitch1, yaw1 + elif a2_valid: + roll, pitch, yaw = roll2, pitch2, yaw2 + else: + raise ValueError("No valid set of Euler angles found") + + else: # Gimbal lock: pitch is at -90 or 90 degrees + yaw = 0.0 # This can be any value, but we choose 0.0 for consistency + if C[2, 0] == -1.0: + pitch = self.xp.pi/2 + roll = yaw + self.xp.arctan2(C[0,1], C[0,2]) + else: + pitch = -self.xp.pi/2 + roll = -yaw + self.xp.arctan2(-C[0,1], -C[0,2]) + return roll, pitch, yaw + + + def get_z_variance(self, points, C_MB, B_r_MB, Sigma_Theta_MB, Sigma_b_r_MB): + """ + Calculate variance in z direction of map frame for each point. + A sensor observes a point in the sensor frame. + The point is then transformed to the map frame via: + M_r_MP = C_MB @ (C_BS @ S_r_SP + B_r_BS) + B_r_MB + and projected into the z direction of the map frame via: + z = H @ M_r_MP, H = [0, 0, 1] + Uncertianty in the transform from the map frame to the base frame, + the transform from the base frame to the sensor frame (mounting uncertainty), + and the uncertainty of the measurement in the sensor frame are propagated + through this equation to determine the uncertainty in the z direction of the map frame. + This is done by computing the Jacobian of the transformation with respect to each of the + variables and then using the covariance matrices to determine the variance in z. + + Args: + points (cupy._core.core.ndarray): Points in sensor frame. AKA S_r_SP + C_MB (cupy._core.core.ndarray): Matrix representing relative orientation of body frame in map frame. + B_r_MB (cupy._core.core.ndarray): Position of body frame in map frame. + Sigma_Theta_MB (cupy._core.core.ndarray): Covariance of base orientation in map frame where orientation is defined + as fixed-axis(extrinsic) xyz(roll, pitch, yaw) Euler angles. + Sigma_b_r_MB (cupy._core.core.ndarray): Covariance of base position in map frame. + + Returns: + cupy._core.core.ndarray: Variance of each point along z direction of map frame. + """ + # C_MB, B_r_MB, Sigma_Theta_MB, Sigma_b_r_MB + assert C_MB.shape == (3, 3), "C_MB should be a 3x3 matrix" + B_r_MB = self.make_3x1vec(B_r_MB) + assert Sigma_Theta_MB.shape == (3, 3), "Sigma_Theta_MB should be a 3x3 matrix" + assert Sigma_b_r_MB.shape == (3, 3), "Sigma_b_r_MB should be a 3x3 matrix" + S_r_SP = points # For ease of numpy/cupy notation points are Nx3 i.e. batch index first + assert S_r_SP.shape[1] == 3, "Points should be a Nx3 matrix" + # J_M_r_MB = I # Jacobian of M_r_MP wrt M_r_MB + B_r_BP = (self.C_BS @ S_r_SP.T + self.B_r_BS).T # B_r_BP = C_BS @ S_r_SP + B_r_BS + J_Theta_MB = self.Jac_of_rot(C_MB, B_r_BP) # Jacobian of M_r_MP wrt Theta_MB + J_B_r_BS = C_MB # Jacobian of M_r_MP wrt B_r_BS + J_Theta_BS = C_MB @ self.Jac_of_rot(self.C_BS, S_r_SP) # Jacobian of M_r_MP wrt Theta_BS + J_S_r_SP, Sigma_S_r_SP = self.noise_model(S_r_SP, C_MB) # Jacobian of M_r_MP wrt S_r_SP + + # Propagate uncertainty + # Could apply H here, but in the future we may want the full + # covariance matrix for applying a single point observation to multiple cells + Sigma_M_r_MP = J_Theta_MB @ Sigma_Theta_MB @ J_Theta_MB.transpose((0,2,1)) + \ + J_B_r_BS @ Sigma_b_r_MB @ J_B_r_BS.T + \ + J_Theta_BS @ self.Sigma_Theta_BS @ J_Theta_BS.transpose((0,2,1)) + \ + J_S_r_SP @ Sigma_S_r_SP @ J_S_r_SP.T + + # Pull out the z variance + z_var = Sigma_M_r_MP[:, 2, 2] + return z_var + + def Jac_of_rot(self, C, r): + """ + Calculate Jacobian of rotation matrix C applied to vector r + with respect to extrinsic (fixed-axis) Euler angles (roll, pitch, yaw) x-y-z. + The rotation matrix is represented as a 3x3 matrix. + The vector is represented as a 3x1 vector. + The Jacobian is a 3x3 matrix. + Using method from "Differentiation of the Orientation Matrix by Matrix Multipliers" by J. Lucas, 1963 + https://www.asprs.org/wp-content/uploads/pers/1963journal/jul/1963_jul_708-715.pdf + + Args: + C (cupy._core.core.ndarray): 3x3 rotation matrix. + r (cupy._core.core.ndarray): nx3 vector. + + Returns: + cupy._core.core.ndarray: nx3x3 Jacobian matrix. For ith point Row j of dCr[i] include the partial derivative] + of the of [C*r]_j with respect to the roll, pitch, and yaw angles. + """ + # First extract the roll extrinsic Euler angle from C + # Ideally these would be provided as inputs, but we can extract them here + roll, _, _ = self.get_ext_euler_angles(C) + Qj = (self.Rx(roll).T) @ (self.SS(self.ei(1))) @ self.Rx(roll) + dC_dyaw = self.SS(self.ei(2)) @ C + dC_dpitch = C @ Qj + dC_droll = C @ (self.SS(self.ei(0))) + + dCr_droll = (dC_droll @ r.T).T + dCr_dpitch = (dC_dpitch @ r.T).T + dCr_dyaw = (dC_dyaw @ r.T).T + + dCr = self.xp.stack([dCr_droll, dCr_dpitch, dCr_dyaw], axis=-1) + return dCr # nx3x3 Jacobian matrix + + +if __name__ == "__main__": + xp = cp + sensor_ID = "test_sls" + C_BS = xp.eye(3)*1.0e0 + B_r_BS = xp.array([0.0, 0.0, 0.0]) + Sigma_Theta_BS = xp.eye(3)*1.0e-1 + Sigma_b_r_BS = xp.eye(3)*1.0e-3 + noise_model_name = "SLS" + # Taking parameters from equaiton 6 in the paper + # sigma_z = b - c * z + 2 * d * z^2 + SLS_params = {"a": 6.8e-3, "b": 28.0e-3, "c": 38.0e-3, "d": (42e-3+2.0e-3)/2.0} + sp = SensorProcessor(sensor_ID, C_BS, B_r_BS, Sigma_Theta_BS, Sigma_b_r_BS, noise_model_name, SLS_params, xp) + # points = xp.array([[0, 0, 1.0], [0.0, 0.0, 2.0], [0.0, 0.0, 3.0], [1.0, 2.0, 3.0]]) + # Generate a larger number of points randomly in range of -20 to 20 in each dimension + points = xp.random.uniform(-20, 20, (1000, 3)) + C_MB = xp.eye(3)*1.0e0 + B_r_MB = xp.array([10.0, 0, 0]) + Sigma_Theta_MB = xp.eye(3)*1.0e-6 + Sigma_b_r_MB = xp.eye(3)*1.0e-1 + # TODO: Deal with data types + z_var = sp.get_z_variance(points, C_MB, B_r_MB, Sigma_Theta_MB, Sigma_b_r_MB) + # print(z_var) + if xp == cp: + from cupyx.profiler import benchmark + print(benchmark(sp.get_z_variance, (points, C_MB, B_r_MB, Sigma_Theta_MB, Sigma_b_r_MB), n_repeat=20)) \ No newline at end of file From 2b1ec730ac517c536ad8e74972ff026d8961c38b Mon Sep 17 00:00:00 2001 From: "W. Jacob Wagner" Date: Fri, 3 May 2024 15:06:58 -0500 Subject: [PATCH 2/7] Integrating sensor_processor with elevation_mapping Seems somewhat slow and ROS support has not been added yet --- .../config/core/example_setup.yaml | 9 ++ .../SensorProcessors.py | 105 +++++++----- .../elevation_mapping.py | 151 +++++++++++++----- .../kernels/custom_kernels.py | 16 +- .../elevation_mapping_cupy/parameter.py | 47 +++++- 5 files changed, 231 insertions(+), 97 deletions(-) diff --git a/elevation_mapping_cupy/config/core/example_setup.yaml b/elevation_mapping_cupy/config/core/example_setup.yaml index a3f7a2f2..ba076265 100644 --- a/elevation_mapping_cupy/config/core/example_setup.yaml +++ b/elevation_mapping_cupy/config/core/example_setup.yaml @@ -27,6 +27,15 @@ subscribers: front_cam: topic_name: '/camera/depth/points' data_type: pointcloud + noise_model_name: SLS + SLS_noise_model_params: + a: 6.8e-3 + b: 28.0e-3 + c: 38.0e-3 + # d: (42e-3+2.0e-3)/2.0 + d: 22.0e-3 + Sigma_Theta_BS_diag: [1.0e-1, 1.0e-1, 1.0e-1] + Sigma_b_r_BS_diag: [1.0e-3, 1.0e-3, 1.0e-3] color_cam: # for color camera topic_name: '/camera/rgb/image_raw' camera_info_topic_name: '/camera/depth/camera_info' diff --git a/elevation_mapping_cupy/script/elevation_mapping_cupy/SensorProcessors.py b/elevation_mapping_cupy/script/elevation_mapping_cupy/SensorProcessors.py index fea2f80a..d7ae0612 100644 --- a/elevation_mapping_cupy/script/elevation_mapping_cupy/SensorProcessors.py +++ b/elevation_mapping_cupy/script/elevation_mapping_cupy/SensorProcessors.py @@ -6,55 +6,73 @@ import cupy as cp + +def make_3x1vec(vec): + """Make sure vector is a 3x1 vector""" + if vec.shape == (3,): + vec = vec[:, np.newaxis] + assert vec.shape == (3, 1), "Vector should be a 3x1 vector" + return vec class SensorProcessor(object): """ Class for processing sensor data. Currenlty restricted to point cloud data. Mainly used for sensor dependent propagation of uncertainty. """ - def __init__(self, sensor_ID, C_BS, B_r_BS, Sigma_Theta_BS, Sigma_b_r_BS, noise_model_name, noise_model_params, xp=cp): + def __init__(self, sensor_ID, noise_model_name, noise_model_params, xp=cp, data_type=cp.float32): """Initialize sensor processor for a specific sensor. Args: sensor_ID (str): Sensor ID. Should be unique for each instance of a sensor - C_BS (cupy._core.core.ndarray): Matrix representing relative orientation of sensor frame in body frame. - (takes points from sensor frame and transforms them to body frame) - B_r_BS (cupy._core.core.ndarray): Position of sensor in body frame. - Sigma_Theta_BS (cupy._core.core.ndarray): Covariance of sensor orientation where orientation is defined - as fixed-axis(extrinsic) xyz(roll, pitch, yaw) Euler angles. - Sigma_b_r_BS (cupy._core.core.ndarray): Covariance of sensor position in base frame. noise_model_name (str): Noise model of the sensor. Either "SLS" or "LiDAR". noise_model_params (dict): Parameters for the noise model. xp (module): Numpy or Cupy module. """ + self.data_type = data_type self.sensor_ID = sensor_ID - assert C_BS.shape == (3, 3), "C_BS should be a 3x3 matrix" - self.C_BS = C_BS - self.B_r_BS = self.make_3x1vec(B_r_BS) - assert Sigma_Theta_BS.shape == (3, 3), "Sigma_Theta_BS should be a 3x3 matrix" - self.Sigma_Theta_BS = Sigma_Theta_BS - assert Sigma_b_r_BS.shape == (3, 3), "Sigma_b_r_BS should be a 3x3 matrix" - self.Sigma_b_r_BS = Sigma_b_r_BS + # Set default values for C_BS and B_r_BS + self.C_BS = xp.eye(3, dtype=self.data_type) + self.B_r_BS = make_3x1vec(xp.array([0.0, 0.0, 0.0],dtype=self.data_type)) + # Set default values for Sigma_Theta_BS and Sigma_b_r_BS + # Sigma_Theta_BS: Covariance of sensor orientation where orientation is defined + # as fixed-axis(extrinsic) xyz(roll, pitch, yaw) Euler angles. + self.Sigma_Theta_BS = xp.zeros((3, 3), dtype=self.data_type) + # Sigma_b_r_BS: Covariance of sensor position in base frame. + self.Sigma_b_r_BS = xp.zeros((3, 3), dtype=self.data_type) + self.noise_models = {"SLS": self.SLS_noise_model, "LiDAR": self.LiDAR_noise_model} # Structured Light Sensor, LiDAR assert noise_model_name in self.noise_models.keys(), "noise_model should be chosen from {}".format(self.noise_models.keys()) self.noise_model_name = noise_model_name self.noise_model = self.noise_models[self.noise_model_name] - self.noise_model_params = noise_model_params + self.noise_model_params = {} # Make sure params are compatible with cupy - for key in self.noise_model_params.keys(): - self.noise_model_params[key] = xp.array(self.noise_model_params[key]) + for key in noise_model_params.keys(): + if key == "Sigma_Theta_BS_diag": + assert len(noise_model_params[key]) == 3, "Sigma_Theta_BS_diag should be 3 element arrays" + self.noise_model_params['Sigma_Theta_BS'] = xp.diag(cp.asarray(noise_model_params[key], dtype=self.data_type)) + elif key == "Sigma_b_r_BS_diag": + assert len(noise_model_params[key]) == 3, "Sigma_b_r_BS_diag should be 3 element arrays" + self.noise_model_params['Sigma_b_r_BS'] = xp.diag(cp.asarray(noise_model_params[key], dtype=self.data_type)) + else: + self.noise_model_params[key] = xp.array(noise_model_params[key], dtype=self.data_type) self.xp = xp - def make_3x1vec(self, vec): - """Make sure vector is a 3x1 vector""" - if vec.shape == (3,): - vec = vec[:, np.newaxis] - assert vec.shape == (3, 1), "Vector should be a 3x1 vector" - return vec + def set_BS_transform(self, C_BS, B_r_BS): + """Set the transformation from body frame to sensor frame + Enables the transformation of points from sensor frame to body frame using: + B_r_BP = C_BS * S_r_SP + B_r_BS + + Args: + C_BS (cupy._core.core.ndarray): Matrix representing relative orientation of sensor frame in body frame. + (takes points from sensor frame and transforms them to body frame) + B_r_BS (cupy._core.core.ndarray): Position of sensor in body frame.""" + assert C_BS.shape == (3, 3), "C_BS should be a 3x3 matrix" + self.C_BS = C_BS + self.B_r_BS = make_3x1vec(B_r_BS) def SS(self, v): """Skew symmetric matrix of a vector v""" - C = self.xp.zeros((3, 3)) + C = self.xp.zeros((3, 3), dtype=self.data_type) C[0, 1] = -v[2] C[0, 2] = v[1] C[1, 0] = v[2] @@ -68,13 +86,14 @@ def SS(self, v): # [-v[1], v[0], 0]]) def ei(self, i): """Basis vector ei""" - return self.xp.array([1 if j == i else 0 for j in range(3)]) + return self.xp.array([1 if j == i else 0 for j in range(3)], dtype=self.data_type) def Rx(self, a): """Rotation matrix around x-axis""" - C = self.xp.eye(3) + C = self.xp.eye(3, dtype=self.data_type) C[1:, 1:] = self.xp.array( [[self.xp.cos(a), -self.xp.sin(a)], - [self.xp.sin(a), self.xp.cos(a)]]) + [self.xp.sin(a), self.xp.cos(a)]], + dtype=self.data_type) return C # Implementing this way instead of below because of cupy compatibility # return self.xp.array([[1, 0, 0], @@ -83,7 +102,7 @@ def Rx(self, a): def Ry(self, b): """Rotation matrix around y-axis""" - C = self.xp.eye(3) + C = self.xp.eye(3, dtype=self.data_type) C[0, 0] = self.xp.cos(b) C[0, 2] = self.xp.sin(b) C[2, 0] = -self.xp.sin(b) @@ -98,7 +117,8 @@ def Rz(self, c): """Rotation matrix around z-axis""" C = self.xp.eye(3) C[:2, :2] = self.xp.array( [[self.xp.cos(c), -self.xp.sin(c)], - [self.xp.sin(c), self.xp.cos(c)]]) + [self.xp.sin(c), self.xp.cos(c)]], + dtype=self.data_type) return C # Implementing this way instead of below because of cupy compatibility # return self.xp.array([[self.xp.cos(c), -self.xp.sin(c), 0], @@ -115,6 +135,7 @@ def diag_array_to_stacks(self, d): A 3D numpy array where D[:,:,i] is a diagonal matrix with d[i,:] on the diagonal """ # TODO: Figure out how to do this without a for loop + # Can't seem to pass dtype=self.data_type, but should be fine return self.xp.stack([self.xp.diag(val) for val in d], axis=0) def SLS_noise_model(self, S_r_SP, C_MB): @@ -250,7 +271,7 @@ def get_z_variance(self, points, C_MB, B_r_MB, Sigma_Theta_MB, Sigma_b_r_MB): """ # C_MB, B_r_MB, Sigma_Theta_MB, Sigma_b_r_MB assert C_MB.shape == (3, 3), "C_MB should be a 3x3 matrix" - B_r_MB = self.make_3x1vec(B_r_MB) + B_r_MB = make_3x1vec(B_r_MB) assert Sigma_Theta_MB.shape == (3, 3), "Sigma_Theta_MB should be a 3x3 matrix" assert Sigma_b_r_MB.shape == (3, 3), "Sigma_b_r_MB should be a 3x3 matrix" S_r_SP = points # For ease of numpy/cupy notation points are Nx3 i.e. batch index first @@ -262,12 +283,16 @@ def get_z_variance(self, points, C_MB, B_r_MB, Sigma_Theta_MB, Sigma_b_r_MB): J_Theta_BS = C_MB @ self.Jac_of_rot(self.C_BS, S_r_SP) # Jacobian of M_r_MP wrt Theta_BS J_S_r_SP, Sigma_S_r_SP = self.noise_model(S_r_SP, C_MB) # Jacobian of M_r_MP wrt S_r_SP + Sigma_Theta_BS = self.noise_model_params["Sigma_Theta_BS"] + Sigma_b_r_BS = self.noise_model_params["Sigma_b_r_BS"] + # Propagate uncertainty # Could apply H here, but in the future we may want the full # covariance matrix for applying a single point observation to multiple cells - Sigma_M_r_MP = J_Theta_MB @ Sigma_Theta_MB @ J_Theta_MB.transpose((0,2,1)) + \ - J_B_r_BS @ Sigma_b_r_MB @ J_B_r_BS.T + \ - J_Theta_BS @ self.Sigma_Theta_BS @ J_Theta_BS.transpose((0,2,1)) + \ + Sigma_M_r_MP = Sigma_b_r_MB + \ + J_Theta_MB @ Sigma_Theta_MB @ J_Theta_MB.transpose((0,2,1)) + \ + J_B_r_BS @ Sigma_b_r_BS @ J_B_r_BS.T + \ + J_Theta_BS @ Sigma_Theta_BS @ J_Theta_BS.transpose((0,2,1)) + \ J_S_r_SP @ Sigma_S_r_SP @ J_S_r_SP.T # Pull out the z variance @@ -313,13 +338,19 @@ def Jac_of_rot(self, C, r): sensor_ID = "test_sls" C_BS = xp.eye(3)*1.0e0 B_r_BS = xp.array([0.0, 0.0, 0.0]) - Sigma_Theta_BS = xp.eye(3)*1.0e-1 - Sigma_b_r_BS = xp.eye(3)*1.0e-3 + Sigma_Theta_BS_diag = xp.array([1.0e-1, 1.0e-1, 1.0e-1]) + Sigma_b_r_BS_diag = xp.array([1.0e-3, 1.0e-3, 1.0e-3]) noise_model_name = "SLS" # Taking parameters from equaiton 6 in the paper # sigma_z = b - c * z + 2 * d * z^2 - SLS_params = {"a": 6.8e-3, "b": 28.0e-3, "c": 38.0e-3, "d": (42e-3+2.0e-3)/2.0} - sp = SensorProcessor(sensor_ID, C_BS, B_r_BS, Sigma_Theta_BS, Sigma_b_r_BS, noise_model_name, SLS_params, xp) + SLS_params = {"a": 6.8e-3, + "b": 28.0e-3, + "c": 38.0e-3, + "d": (42e-3+2.0e-3)/2.0, + "Sigma_Theta_BS_diag": Sigma_Theta_BS_diag, + "Sigma_b_r_BS_diag": Sigma_b_r_BS_diag} + sp = SensorProcessor(sensor_ID, noise_model_name, SLS_params, xp) + sp.set_BS_transform(C_BS, B_r_BS) # points = xp.array([[0, 0, 1.0], [0.0, 0.0, 2.0], [0.0, 0.0, 3.0], [1.0, 2.0, 3.0]]) # Generate a larger number of points randomly in range of -20 to 20 in each dimension points = xp.random.uniform(-20, 20, (1000, 3)) diff --git a/elevation_mapping_cupy/script/elevation_mapping_cupy/elevation_mapping.py b/elevation_mapping_cupy/script/elevation_mapping_cupy/elevation_mapping.py index 0ff32d9a..7fe97602 100644 --- a/elevation_mapping_cupy/script/elevation_mapping_cupy/elevation_mapping.py +++ b/elevation_mapping_cupy/script/elevation_mapping_cupy/elevation_mapping.py @@ -28,6 +28,7 @@ from elevation_mapping_cupy.kernels import polygon_mask_kernel from elevation_mapping_cupy.kernels import image_to_map_correspondence_kernel +from elevation_mapping_cupy.sensor_processor import SensorProcessor, make_3x1vec from elevation_mapping_cupy.map_initializer import MapInitializer from elevation_mapping_cupy.plugins.plugin_manager import PluginManager from elevation_mapping_cupy.semantic_map import SemanticMap @@ -58,7 +59,7 @@ def __init__(self, param: Parameter): self.param = param self.data_type = self.param.data_type self.resolution = param.resolution - self.center = xp.array([0, 0, 0], dtype=self.data_type) + self.center = xp.array([[0],[0],[0]], dtype=self.data_type) self.base_rotation = xp.eye(3, dtype=self.data_type) self.map_length = param.map_length self.cell_n = param.cell_n @@ -84,6 +85,9 @@ def __init__(self, param: Parameter): self.elevation_map[1] += self.initial_variance self.elevation_map[3] += 1.0 + # Configure sensors + self.configure_sensors(param) + # overlap clearance cell_range = int(self.param.overlap_clear_range_xy / self.resolution) cell_range = np.clip(cell_range, 0, self.cell_n) @@ -115,6 +119,22 @@ def __init__(self, param: Parameter): self.plugin_manager.load_plugin_settings(plugin_config_file) self.map_initializer = MapInitializer(self.initial_variance, param.initialized_variance, xp=cp, method="points") + + def configure_sensors(self, param): + """Configure the sensor Processors for the elevation map. + + Args: + param (elevation_mapping_cupy.parameter.Parameter): + """ + self.sensor_processors = {} + for sensor_ID, config in param.subscriber_cfg.items(): + if config["data_type"] == "pointcloud": + nm_name = config["noise_model_name"] + nm_param_name = nm_name + "_noise_model_params" + nm_params = config.get(nm_param_name, {}) + sp = SensorProcessor(sensor_ID, nm_name, nm_params, xp) + self.sensor_processors[sensor_ID] = sp + def clear(self): """Reset all the layers of the elevation & the semantic map.""" @@ -238,7 +258,6 @@ def compile_kernels(self): self.resolution, self.cell_n, self.cell_n, - self.param.sensor_noise_factor, self.param.mahalanobis_thresh, self.param.outlier_variance, self.param.wall_num_thresh, @@ -257,7 +276,6 @@ def compile_kernels(self): self.resolution, self.cell_n, self.cell_n, - self.param.sensor_noise_factor, self.param.mahalanobis_thresh, self.param.drift_compensation_variance_inlier, self.param.traversability_inlier, @@ -303,39 +321,57 @@ def shift_translation_to_map_center(self, t): Args: t (cupy._core.core.ndarray): Absolute point position + + Returns: + cupy._core.core.ndarray: Translated point position """ - t -= self.center + t_shift = t - self.center + return t_shift - def update_map_with_kernel(self, points_all, channels, R, t, position_noise, orientation_noise): + def update_map_with_kernel(self, sensor_ID, points_all, channels, C_MB, B_r_MB, Sigma_b_r_MB, Sigma_Theta_MB): """Update map with new measurement. Args: + sensor_ID (str): Sensor ID points_all (cupy._core.core.ndarray): - channels (List[str]): - R (cupy._core.core.ndarray): - t (cupy._core.core.ndarray): - position_noise (float): - orientation_noise (float): + channels (List[str]): List of channels in the point cloud besides x, y, z + C_MB (cupy._core.core.ndarray): Rotation matrix from the map frame to the base frame + B_r_MB (cupy._core.core.ndarray): Translation vector from the map frame to the base frame + Sigma_b_r_MB (cupy._core.core.ndarray): Covariance of base position in map frame. + Sigma_Theta_MB (cupy._core.core.ndarray): Covariance of base orientation in map frame where orientation is defined + as fixed-axis(extrinsic) xyz(roll, pitch, yaw) Euler angles. """ self.new_map *= 0.0 error = cp.array([0.0], dtype=cp.float32) error_cnt = cp.array([0], dtype=cp.float32) points = points_all[:, :3] + # Obtain the transform from the map frame to the sensor frame + C_BS = self.sensor_processors[sensor_ID].C_BS + B_r_BS = self.sensor_processors[sensor_ID].B_r_BS + C_MS = C_MB @ C_BS + B_r_MS = C_MB @ B_r_BS + B_r_MB # additional_fusion = self.get_fusion_of_pcl(channels) with self.map_lock: - self.shift_translation_to_map_center(t) + # Now the translation is w.r.t. the map center frame O + O_r_OS = self.shift_translation_to_map_center(B_r_MS) + # C_MS is equivalent to C_OS because we assume the map center frame O is aligned with the map frame M + C_OS = C_MS self.error_counting_kernel( self.elevation_map, points, cp.array([0.0], dtype=self.data_type), cp.array([0.0], dtype=self.data_type), - R, - t, + C_OS, + O_r_OS, self.new_map, error, error_cnt, size=(points.shape[0]), ) + # Compute position and orientation noise of the base frame in the map frame for + # triggering drift compensation + position_noise = cp.sqrt(Sigma_b_r_MB[0, 0] + Sigma_b_r_MB[1, 1] + Sigma_b_r_MB[2, 2]) + orientation_noise = cp.sqrt(Sigma_Theta_MB[0, 0] + Sigma_Theta_MB[1, 1] + Sigma_Theta_MB[2, 2]) if ( self.param.enable_drift_compensation and error_cnt > self.param.min_height_drift_cnt @@ -348,11 +384,14 @@ def update_map_with_kernel(self, points_all, channels, R, t, position_noise, ori self.additive_mean_error += self.mean_error if np.abs(self.mean_error) < self.param.max_drift: self.elevation_map[0] += self.mean_error * self.param.drift_compensation_alpha + # Compute the vertical variance for the sensor using uncertainty propagation for the sensor + var_h = self.sensor_processors[sensor_ID].get_z_variance(points, C_MB, B_r_MB, Sigma_Theta_MB, Sigma_b_r_MB) self.add_points_kernel( cp.array([0.0], dtype=self.data_type), cp.array([0.0], dtype=self.data_type), - R, - t, + C_OS, + O_r_OS, + var_h, self.normal_map, points, self.elevation_map, @@ -361,10 +400,10 @@ def update_map_with_kernel(self, points_all, channels, R, t, position_noise, ori ) self.average_map_kernel(self.new_map, self.elevation_map, size=(self.cell_n * self.cell_n)) - self.semantic_map.update_layers_pointcloud(points_all, channels, R, t, self.new_map) + self.semantic_map.update_layers_pointcloud(points_all, channels, C_OS, O_r_OS, self.new_map) if self.param.enable_overlap_clearance: - self.clear_overlap_map(t) + self.clear_overlap_map(O_r_OS) # dilation before traversability_filter self.traversability_input *= 0.0 self.dilation_filter_kernel( @@ -426,23 +465,26 @@ def update_upper_bound_with_valid_elevation(self): def input_pointcloud( self, + sensor_ID: str, raw_points: cp._core.core.ndarray, channels: List[str], - R: cp._core.core.ndarray, - t: cp._core.core.ndarray, - position_noise: float, - orientation_noise: float, + C_MB: cp._core.core.ndarray, + B_r_MB: cp._core.core.ndarray, + Sigma_b_r_MB: cp._core.core.ndarray, + Sigma_Theta_MB: cp._core.core.ndarray, ): """Input the point cloud and fuse the new measurements to update the elevation map. Args: - raw_points (cupy._core.core.ndarray): - channels (List[str]): - R (cupy._core.core.ndarray): - t (cupy._core.core.ndarray): - position_noise (float): - orientation_noise (float): - + sensor_ID (str): Sensor ID + raw_points (cupy._core.core.ndarray): Points in the sensor frame (S_r_SP) + channels (List[str]): List of channels in the point cloud including x, y, z as + the first three channels + C_MB (cupy._core.core.ndarray): Rotation matrix from the map frame to the base frame + B_r_MB (cupy._core.core.ndarray): Translation vector from the map frame to the base frame + Sigma_b_r_MB (cupy._core.core.ndarray): Covariance of base position in map frame. + Sigma_Theta_MB (cupy._core.core.ndarray): Covariance of base orientation in map frame where orientation is defined + as fixed-axis(extrinsic) xyz(roll, pitch, yaw) Euler angles. Returns: None: """ @@ -450,12 +492,13 @@ def input_pointcloud( additional_channels = channels[3:] raw_points = raw_points[~cp.isnan(raw_points).any(axis=1)] self.update_map_with_kernel( + sensor_ID, raw_points, additional_channels, - cp.asarray(R, dtype=self.data_type), - cp.asarray(t, dtype=self.data_type), - position_noise, - orientation_noise, + cp.asarray(C_MB, dtype=self.data_type), + make_3x1vec(cp.asarray(B_r_MB, dtype=self.data_type)), + cp.asarray(Sigma_b_r_MB, dtype=self.data_type), + cp.asarray(Sigma_Theta_MB, dtype=self.data_type), ) def input_image( @@ -877,15 +920,32 @@ def initialize_map(self, points, method="cubic"): # $ python -m cProfile -o profile.stats elevation_mapping.py # $ snakeviz profile.stats xp.random.seed(123) - R = xp.random.rand(3, 3) - t = xp.random.rand(3) - print(R, t) + # C_MB = xp.random.rand(3, 3) + C_MB = xp.eye(3,3) + # b_r_MB = xp.random.rand(3) + b_r_MB = xp.zeros(3) + b_r_MB[2] += 1.0 + print(C_MB, b_r_MB) + Sigma_b_r_MB = xp.eye(3,3)*0.1 + Sigma_Theta_MB = xp.eye(3,3)*0.1 param = Parameter( use_chainer=False, weight_file="../config/weights.dat", plugin_config_file="../config/plugin_config.yaml", ) - param.additional_layers = ["rgb", "grass", "tree", "people"] - param.fusion_algorithms = ["color", "class_bayesian", "class_bayesian", "class_bayesian"] + # param.additional_layers = ["rgb", "grass", "tree", "people"] + # param.fusion_algorithms = ["color", "class_bayesian", "class_bayesian", "class_bayesian"] + # param.additional_layers = ["rgb"] + param.additional_layers = [] + # param.fusion_algorithms = ["pointcloud_color"] + param.initial_variance = 1000.0 + param.initialized_variance = 1000.0 + param.max_height_range = 100.0 + param.mahalanobis_thresh = 100.0 + param.ramped_height_range_a = 100.0 + # param.pointcloud_channel_fusions = {"rgb": "color"}#, "default": "average"} + param.pointcloud_channel_fusions = {"default": "average"} param.update() + # Should be front_cam + sensor_ID = list(param.subscriber_cfg.keys())[0] elevation = ElevationMap(param) layers = [ "elevation", @@ -896,16 +956,23 @@ def initialize_map(self, points, method="cubic"): "inpaint", "rgb", ] - points = xp.random.rand(100000, len(layers)) + # Points within a 1m.x1m.x1m. cube + points = xp.random.rand(10000, 3 + len(param.additional_layers)) + # points[:, 0] = 0.0 + # points[:, 1] = 0.0 + points[:, 2] = -1.0 channels = ["x", "y", "z"] + param.additional_layers print(channels) data = np.zeros((elevation.cell_n - 2, elevation.cell_n - 2), dtype=np.float32) for i in range(50): - elevation.input_pointcloud(points, channels, R, t, 0, 0) - elevation.update_normal(elevation.elevation_map[0]) - pos = np.array([i * 0.01, i * 0.02, i * 0.01]) - elevation.move_to(pos, R) + points[:, 0] += param.resolution*2 + points[:, 1] += param.resolution*2 + points[:, 2] += 0.1 + elevation.input_pointcloud(sensor_ID, points, channels, C_MB, b_r_MB, Sigma_b_r_MB, Sigma_Theta_MB) + # elevation.update_normal(elevation.elevation_map[0]) + # pos = np.array([i * 0.01, i * 0.02, i * 0.01]) + # elevation.move_to(pos, R) for layer in layers: elevation.get_map_with_name_ref(layer, data) print(i) diff --git a/elevation_mapping_cupy/script/elevation_mapping_cupy/kernels/custom_kernels.py b/elevation_mapping_cupy/script/elevation_mapping_cupy/kernels/custom_kernels.py index d7025270..b0e84a58 100644 --- a/elevation_mapping_cupy/script/elevation_mapping_cupy/kernels/custom_kernels.py +++ b/elevation_mapping_cupy/script/elevation_mapping_cupy/kernels/custom_kernels.py @@ -10,7 +10,6 @@ def map_utils( resolution, width, height, - sensor_noise_factor, min_valid_distance, max_height_range, ramped_height_range_a, @@ -51,13 +50,12 @@ def map_utils( const int layer = ${width} * ${height}; return layer * layer_n + idx; } + // TODO: Possibly Replace __device__ float transform_p(float16 x, float16 y, float16 z, float16 r0, float16 r1, float16 r2, float16 t) { return r0 * x + r1 * y + r2 * z + t; } - __device__ float z_noise(float16 z){ - return ${sensor_noise_factor} * z * z; - } + // __device__ float point_sensor_distance(float16 x, float16 y, float16 z, float16 sx, float16 sy, float16 sz) { @@ -112,7 +110,6 @@ def map_utils( resolution=resolution, width=width, height=height, - sensor_noise_factor=sensor_noise_factor, min_valid_distance=min_valid_distance, max_height_range=max_height_range, ramped_height_range_a=ramped_height_range_a, @@ -126,7 +123,6 @@ def add_points_kernel( resolution, width, height, - sensor_noise_factor, mahalanobis_thresh, outlier_variance, wall_num_thresh, @@ -142,13 +138,12 @@ def add_points_kernel( enable_visibility_cleanup=True, ): add_points_kernel = cp.ElementwiseKernel( - in_params="raw U center_x, raw U center_y, raw U R, raw U t, raw U norm_map", + in_params="raw U center_x, raw U center_y, raw U R, raw U t, raw U var_h, raw U norm_map", out_params="raw U p, raw U map, raw T newmap", preamble=map_utils( resolution, width, height, - sensor_noise_factor, min_valid_distance, max_height_range, ramped_height_range_a, @@ -163,7 +158,7 @@ def add_points_kernel( U x = transform_p(rx, ry, rz, R[0], R[1], R[2], t[0]); U y = transform_p(rx, ry, rz, R[3], R[4], R[5], t[1]); U z = transform_p(rx, ry, rz, R[6], R[7], R[8], t[2]); - U v = z_noise(rz); + U v = var_h[i]; // TODO: double check indexing here int idx = get_idx(x, y, center_x[0], center_y[0]); if (is_valid(x, y, z, t[0], t[1], t[2])) { if (is_inside(idx)) { @@ -281,7 +276,6 @@ def error_counting_kernel( resolution, width, height, - sensor_noise_factor, mahalanobis_thresh, outlier_variance, traversability_inlier, @@ -298,7 +292,6 @@ def error_counting_kernel( resolution, width, height, - sensor_noise_factor, min_valid_distance, max_height_range, ramped_height_range_a, @@ -313,7 +306,6 @@ def error_counting_kernel( U x = transform_p(rx, ry, rz, R[0], R[1], R[2], t[0]); U y = transform_p(rx, ry, rz, R[3], R[4], R[5], t[1]); U z = transform_p(rx, ry, rz, R[6], R[7], R[8], t[2]); - U v = z_noise(rz); // if (!is_valid(z, t[2])) {return;} if (!is_valid(x, y, z, t[0], t[1], t[2])) {return;} // if ((x - t[0]) * (x - t[0]) + (y - t[1]) * (y - t[1]) + (z - t[2]) * (z - t[2]) < 0.5) {return;} diff --git a/elevation_mapping_cupy/script/elevation_mapping_cupy/parameter.py b/elevation_mapping_cupy/script/elevation_mapping_cupy/parameter.py index 5fc3c110..03db470b 100644 --- a/elevation_mapping_cupy/script/elevation_mapping_cupy/parameter.py +++ b/elevation_mapping_cupy/script/elevation_mapping_cupy/parameter.py @@ -8,6 +8,8 @@ from simple_parsing.helpers import Serializable from dataclasses import field from typing import Tuple +import os +import yaml @dataclass @@ -141,6 +143,15 @@ class Parameter(Serializable): "channels": ["rgb", "person"], "topic_name": "/elevation_mapping/pointcloud_semantic", "data_type": "pointcloud", + "noise_model_name": "SLS", + "SLS_noise_model_params": { + "a": 6.8e-3, + "b": 28.0e-3, + "c": 38.0e-3, + "d": 22.0e-3, + "Sigma_Theta_BS_diag": [1.0e-1, 1.0e-1, 1.0e-1], + "Sigma_b_r_BS_diag": [1.0e-3, 1.0e-3, 1.0e-3] + } } } ) # configuration for the subscriber @@ -287,14 +298,38 @@ def update(self): self.cell_n = int(round(self.map_length / self.resolution)) + 2 self.true_cell_n = round(self.map_length / self.resolution) self.true_map_length = self.true_cell_n * self.resolution + + def load_from_yaml(self, filename): + + with open(filename, 'r') as file: + data = yaml.safe_load(file) + + for key, value in data.items(): + if key in self.__dict__: + setattr(self, key, value) + elif key == "subscribers": + key = "subscriber_cfg" + setattr(self, key, value) + else: + print(f"Key {key} not found in parameter class") if __name__ == "__main__": param = Parameter() - print(param) - print(param.resolution) - param.set_value("resolution", 0.1) - print(param.resolution) + # Use the directory of the script to navigate to the core config directory + # Which should be located at: ws_dir/elevation_mapping_cupy/elevation_mapping_cupy/script/elevation_mapping_cupy/parameter.py + emcupy_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) + config_dir = os.path.join(emcupy_dir, 'config') + # Navigate down to the core directory for testing + core_dir = os.path.join(config_dir, 'core') + filename = os.path.join(core_dir, "example_setup.yaml") + print("Default parameter 'subscriber_cfg' ", param.subscriber_cfg) + param.load_from_yaml(filename) + print("Loaded parameter 'subscriber_cfg' ", param.subscriber_cfg) + # print(param) + # print(param.resolution) + # param.set_value("resolution", 0.1) + # print(param.resolution) - print("names ", param.get_names()) - print("types ", param.get_types()) + # print("names ", param.get_names()) + # print("types ", param.get_types()) From 187e408703cb5806795f4fb0379dcb2d81fb2d31 Mon Sep 17 00:00:00 2001 From: "W. Jacob Wagner" Date: Tue, 7 May 2024 20:39:48 -0500 Subject: [PATCH 3/7] Added sensor models to replicate old behavior and constant variance Fixing datatype issues in SensorProcessor --- .../config/core/core_param.yaml | 1 - .../config/core/example_setup.yaml | 5 ++ .../SensorProcessors.py | 47 +++++++++++++++++-- .../elevation_mapping.py | 39 ++++++++------- 4 files changed, 71 insertions(+), 21 deletions(-) diff --git a/elevation_mapping_cupy/config/core/core_param.yaml b/elevation_mapping_cupy/config/core/core_param.yaml index 17baea09..296a5229 100644 --- a/elevation_mapping_cupy/config/core/core_param.yaml +++ b/elevation_mapping_cupy/config/core/core_param.yaml @@ -1,7 +1,6 @@ #### Basic parameters ######## resolution: 0.04 # resolution in m. map_length: 8.0 # map's size in m. -sensor_noise_factor: 0.05 # point's noise is sensor_noise_factor*z^2 (z is distance from sensor). mahalanobis_thresh: 2.0 # points outside this distance is outlier. outlier_variance: 0.01 # if point is outlier, add this value to the cell. drift_compensation_variance_inler: 0.05 # cells under this value is used for drift compensation. diff --git a/elevation_mapping_cupy/config/core/example_setup.yaml b/elevation_mapping_cupy/config/core/example_setup.yaml index ba076265..5946ab78 100644 --- a/elevation_mapping_cupy/config/core/example_setup.yaml +++ b/elevation_mapping_cupy/config/core/example_setup.yaml @@ -22,12 +22,17 @@ image_channel_fusions: # camera_info_topic_name: '/camera/depth/camera_info' # channel_info_topic_name: '/camera/channel_info' +additional_layers: [] subscribers: front_cam: topic_name: '/camera/depth/points' data_type: pointcloud noise_model_name: SLS + SLS_old_noise_model_params: + sensor_noise_factor: 0.05 # point's noise is sensor_noise_factor*z^2 (z is distance from sensor). + constant_noise_model_params: + constant_variance: 0.01 SLS_noise_model_params: a: 6.8e-3 b: 28.0e-3 diff --git a/elevation_mapping_cupy/script/elevation_mapping_cupy/SensorProcessors.py b/elevation_mapping_cupy/script/elevation_mapping_cupy/SensorProcessors.py index d7ae0612..7fab214b 100644 --- a/elevation_mapping_cupy/script/elevation_mapping_cupy/SensorProcessors.py +++ b/elevation_mapping_cupy/script/elevation_mapping_cupy/SensorProcessors.py @@ -40,7 +40,10 @@ def __init__(self, sensor_ID, noise_model_name, noise_model_params, xp=cp, data_ # Sigma_b_r_BS: Covariance of sensor position in base frame. self.Sigma_b_r_BS = xp.zeros((3, 3), dtype=self.data_type) - self.noise_models = {"SLS": self.SLS_noise_model, "LiDAR": self.LiDAR_noise_model} # Structured Light Sensor, LiDAR + self.noise_models = {"SLS": self.SLS_noise_model, # Structured Light Sensor Kinect v2 Fankhauser et al. 2015 + "SLS_old": self.SLS_old_noise_model, # Isotropic variance proportional to square of distance in z direction + "constant": self.constant_noise_model, # Constant variance for all points + "LiDAR": self.LiDAR_noise_model} # LiDAR assert noise_model_name in self.noise_models.keys(), "noise_model should be chosen from {}".format(self.noise_models.keys()) self.noise_model_name = noise_model_name self.noise_model = self.noise_models[self.noise_model_name] @@ -67,8 +70,8 @@ def set_BS_transform(self, C_BS, B_r_BS): (takes points from sensor frame and transforms them to body frame) B_r_BS (cupy._core.core.ndarray): Position of sensor in body frame.""" assert C_BS.shape == (3, 3), "C_BS should be a 3x3 matrix" - self.C_BS = C_BS - self.B_r_BS = make_3x1vec(B_r_BS) + self.C_BS = cp.asarray(C_BS, dtype=self.data_type) + self.B_r_BS = cp.asarray(make_3x1vec(B_r_BS)) def SS(self, v): """Skew symmetric matrix of a vector v""" @@ -169,6 +172,34 @@ def SLS_noise_model(self, S_r_SP, C_MB): return J_S_r_SP, Sigma_S_r_SP + def SLS_old_noise_model(self, S_r_SP, C_MB): + """ + Noise model assumed by em_cupy prior to the addition of the SensorProcessor class. + This method is kept for backwards compatibility. Assumes an isotropic variance + proportional to the square of the distance from the sensor along the z direction (front), + of the camera. + """ + # Error propagation can be ignored for this approach as the variance is isotropic and + # any rotations will not change the shape of the distribution + J_S_r_SP = None + z_noise = self.noise_model_params["sensor_noise_factor"] * self.xp.power(S_r_SP[:, 2], 2, dtype=self.data_type) + N = S_r_SP.shape[0] + Sigma_S_r_SP = self.diag_array_to_stacks(self.xp.array([z_noise, z_noise, z_noise]).T) + return J_S_r_SP, Sigma_S_r_SP + + def constant_noise_model(self, S_r_SP, C_MB): + """ + Assume constant variance for all points + """ + # Error propagation can be ignored for this approach as the variance is isotropic and + # any rotations will not change the shape of the distribution + J_S_r_SP = None + var = self.noise_model_params["constant_variance"] + N = S_r_SP.shape[0] + Sigma = self.xp.diag(self.xp.array([var, var, var], dtype=self.data_type)) + Sigma_S_r_SP = self.xp.repeat(Sigma[np.newaxis, :, :], N, axis=0) + return J_S_r_SP, Sigma_S_r_SP + def LiDAR_noise_model(self, points, C_MB): raise NotImplementedError("LiDAR noise model not implemented yet") @@ -276,6 +307,16 @@ def get_z_variance(self, points, C_MB, B_r_MB, Sigma_Theta_MB, Sigma_b_r_MB): assert Sigma_b_r_MB.shape == (3, 3), "Sigma_b_r_MB should be a 3x3 matrix" S_r_SP = points # For ease of numpy/cupy notation points are Nx3 i.e. batch index first assert S_r_SP.shape[1] == 3, "Points should be a Nx3 matrix" + + # If using the old SLS noise model, we can ignore error propagation + if self.noise_model_name == "SLS_old": + J_S_r_SP, Sigma_S_r_SP = self.noise_model(S_r_SP, C_MB) + return Sigma_S_r_SP[:, 2, 2] + elif self.noise_model_name == "constant": + J_S_r_SP, Sigma_S_r_SP = self.noise_model(S_r_SP, C_MB) + return Sigma_S_r_SP[:, 2, 2] + + # Error propagation # J_M_r_MB = I # Jacobian of M_r_MP wrt M_r_MB B_r_BP = (self.C_BS @ S_r_SP.T + self.B_r_BS).T # B_r_BP = C_BS @ S_r_SP + B_r_BS J_Theta_MB = self.Jac_of_rot(C_MB, B_r_BP) # Jacobian of M_r_MP wrt Theta_MB diff --git a/elevation_mapping_cupy/script/elevation_mapping_cupy/elevation_mapping.py b/elevation_mapping_cupy/script/elevation_mapping_cupy/elevation_mapping.py index 7fe97602..655e03c4 100644 --- a/elevation_mapping_cupy/script/elevation_mapping_cupy/elevation_mapping.py +++ b/elevation_mapping_cupy/script/elevation_mapping_cupy/elevation_mapping.py @@ -919,31 +919,36 @@ def initialize_map(self, points, method="cubic"): # Test script for profiling. # $ python -m cProfile -o profile.stats elevation_mapping.py # $ snakeviz profile.stats + + # Get the directory of the script + # Which should be located at: ws_dir/elevation_mapping_cupy/elevation_mapping_cupy/script/elevation_mapping.py + script_dir = os.path.dirname(os.path.realpath(__file__)) + # Back up two directories to find the base directory of the package + # located here: ws_dir/elevation_mapping_cupy/ + base_dir = os.path.dirname(os.path.dirname(script_dir)) + # Navigate down to the core config directory + # located here: ws_dir/elevation_mapping_cupy/elevation_mapping_cupy/config/core/plugin_config.yaml + core_dir = os.path.join(base_dir, 'config', 'core') + xp.random.seed(123) - # C_MB = xp.random.rand(3, 3) C_MB = xp.eye(3,3) - # b_r_MB = xp.random.rand(3) b_r_MB = xp.zeros(3) b_r_MB[2] += 1.0 print(C_MB, b_r_MB) + Sigma_b_r_MB = xp.eye(3,3)*0.1 Sigma_Theta_MB = xp.eye(3,3)*0.1 param = Parameter( - use_chainer=False, weight_file="../config/weights.dat", plugin_config_file="../config/plugin_config.yaml", + use_chainer=False, weight_file=os.path.join(core_dir, "weights.dat"), plugin_config_file=os.path.join(core_dir, "plugin_config.yaml"), ) - # param.additional_layers = ["rgb", "grass", "tree", "people"] - # param.fusion_algorithms = ["color", "class_bayesian", "class_bayesian", "class_bayesian"] - # param.additional_layers = ["rgb"] - param.additional_layers = [] - # param.fusion_algorithms = ["pointcloud_color"] - param.initial_variance = 1000.0 - param.initialized_variance = 1000.0 - param.max_height_range = 100.0 - param.mahalanobis_thresh = 100.0 - param.ramped_height_range_a = 100.0 - # param.pointcloud_channel_fusions = {"rgb": "color"}#, "default": "average"} - param.pointcloud_channel_fusions = {"default": "average"} + param.load_from_yaml(os.path.join(core_dir, "core_param.yaml")) + param.load_from_yaml(os.path.join(core_dir, "example_setup.yaml")) + # Override weights file location and plugin config file location that are set in the yaml file + # Not removing there for ROS compatability + param.weight_file = os.path.join(core_dir, "weights.dat") + param.plugin_config_file = os.path.join(core_dir, "plugin_config.yaml") param.update() + param.additional_layers = [] # Should be front_cam sensor_ID = list(param.subscriber_cfg.keys())[0] elevation = ElevationMap(param) @@ -958,14 +963,14 @@ def initialize_map(self, points, method="cubic"): ] # Points within a 1m.x1m.x1m. cube points = xp.random.rand(10000, 3 + len(param.additional_layers)) - # points[:, 0] = 0.0 - # points[:, 1] = 0.0 + # Set starting elevation to -1.0 meter points[:, 2] = -1.0 channels = ["x", "y", "z"] + param.additional_layers print(channels) data = np.zeros((elevation.cell_n - 2, elevation.cell_n - 2), dtype=np.float32) for i in range(50): + # Move points along xy direction and up to test mapping points[:, 0] += param.resolution*2 points[:, 1] += param.resolution*2 points[:, 2] += 0.1 From 518d0a6fbb044fcd62d151bb3844327a19adeb51 Mon Sep 17 00:00:00 2001 From: "W. Jacob Wagner" Date: Tue, 7 May 2024 20:54:21 -0500 Subject: [PATCH 4/7] renamed SensorProcessor to sensor_processor to match naming convention --- .../{SensorProcessors.py => sensor_processor.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename elevation_mapping_cupy/script/elevation_mapping_cupy/{SensorProcessors.py => sensor_processor.py} (100%) diff --git a/elevation_mapping_cupy/script/elevation_mapping_cupy/SensorProcessors.py b/elevation_mapping_cupy/script/elevation_mapping_cupy/sensor_processor.py similarity index 100% rename from elevation_mapping_cupy/script/elevation_mapping_cupy/SensorProcessors.py rename to elevation_mapping_cupy/script/elevation_mapping_cupy/sensor_processor.py From 3d95dceed33286e16799f4767ccf24e3c463f89e Mon Sep 17 00:00:00 2001 From: "W. Jacob Wagner" Date: Wed, 8 May 2024 13:48:21 -0500 Subject: [PATCH 5/7] Adding nvtx profiling and fixing float64 vs float 32 conversions --- .../sensor_processor.py | 41 ++++++++++++++----- 1 file changed, 31 insertions(+), 10 deletions(-) diff --git a/elevation_mapping_cupy/script/elevation_mapping_cupy/sensor_processor.py b/elevation_mapping_cupy/script/elevation_mapping_cupy/sensor_processor.py index 7fab214b..a2352f4c 100644 --- a/elevation_mapping_cupy/script/elevation_mapping_cupy/sensor_processor.py +++ b/elevation_mapping_cupy/script/elevation_mapping_cupy/sensor_processor.py @@ -4,7 +4,7 @@ # import numpy as np import cupy as cp - +from cupyx.profiler import time_range def make_3x1vec(vec): @@ -19,6 +19,7 @@ class SensorProcessor(object): Currenlty restricted to point cloud data. Mainly used for sensor dependent propagation of uncertainty. """ + @time_range('init', color_id=0) def __init__(self, sensor_ID, noise_model_name, noise_model_params, xp=cp, data_type=cp.float32): """Initialize sensor processor for a specific sensor. @@ -60,6 +61,7 @@ def __init__(self, sensor_ID, noise_model_name, noise_model_params, xp=cp, data_ self.noise_model_params[key] = xp.array(noise_model_params[key], dtype=self.data_type) self.xp = xp + @time_range('set_BS_transform', color_id=0) def set_BS_transform(self, C_BS, B_r_BS): """Set the transformation from body frame to sensor frame Enables the transformation of points from sensor frame to body frame using: @@ -71,8 +73,9 @@ def set_BS_transform(self, C_BS, B_r_BS): B_r_BS (cupy._core.core.ndarray): Position of sensor in body frame.""" assert C_BS.shape == (3, 3), "C_BS should be a 3x3 matrix" self.C_BS = cp.asarray(C_BS, dtype=self.data_type) - self.B_r_BS = cp.asarray(make_3x1vec(B_r_BS)) + self.B_r_BS = cp.asarray(make_3x1vec(B_r_BS), dtype=self.data_type) + @time_range('SS', color_id=0) def SS(self, v): """Skew symmetric matrix of a vector v""" C = self.xp.zeros((3, 3), dtype=self.data_type) @@ -87,10 +90,13 @@ def SS(self, v): # return self.xp.array([[0, -v[2], v[1]], # [v[2], 0, -v[0]], # [-v[1], v[0], 0]]) + + @time_range('ei', color_id=0) def ei(self, i): """Basis vector ei""" return self.xp.array([1 if j == i else 0 for j in range(3)], dtype=self.data_type) + @time_range('Rx', color_id=0) def Rx(self, a): """Rotation matrix around x-axis""" C = self.xp.eye(3, dtype=self.data_type) @@ -103,6 +109,7 @@ def Rx(self, a): # [0, self.xp.cos(a), -self.xp.sin(a)], # [0, self.xp.sin(a), self.xp.cos(a)]]) + @time_range('Ry', color_id=0) def Ry(self, b): """Rotation matrix around y-axis""" C = self.xp.eye(3, dtype=self.data_type) @@ -116,6 +123,7 @@ def Ry(self, b): # [0, 1, 0], # [-self.xp.sin(b), 0, self.xp.cos(b)]]) + @time_range('Rz', color_id=0) def Rz(self, c): """Rotation matrix around z-axis""" C = self.xp.eye(3) @@ -128,6 +136,7 @@ def Rz(self, c): # [self.xp.sin(c), self.xp.cos(c), 0], # [0, 0, 1]]) + @time_range('diag_array_to_stacks', color_id=0) def diag_array_to_stacks(self, d): """Converts a 2D array where each row defines a diagonal into a 3D array of diagonal matrices @@ -141,6 +150,7 @@ def diag_array_to_stacks(self, d): # Can't seem to pass dtype=self.data_type, but should be fine return self.xp.stack([self.xp.diag(val) for val in d], axis=0) + @time_range('SLS_noise_model', color_id=0) def SLS_noise_model(self, S_r_SP, C_MB): """ Structured Light Sensor noise model. @@ -172,6 +182,7 @@ def SLS_noise_model(self, S_r_SP, C_MB): return J_S_r_SP, Sigma_S_r_SP + @time_range('SLS_old_noise_model', color_id=0) def SLS_old_noise_model(self, S_r_SP, C_MB): """ Noise model assumed by em_cupy prior to the addition of the SensorProcessor class. @@ -187,6 +198,7 @@ def SLS_old_noise_model(self, S_r_SP, C_MB): Sigma_S_r_SP = self.diag_array_to_stacks(self.xp.array([z_noise, z_noise, z_noise]).T) return J_S_r_SP, Sigma_S_r_SP + @time_range('constant_noise_model', color_id=0) def constant_noise_model(self, S_r_SP, C_MB): """ Assume constant variance for all points @@ -200,10 +212,12 @@ def constant_noise_model(self, S_r_SP, C_MB): Sigma_S_r_SP = self.xp.repeat(Sigma[np.newaxis, :, :], N, axis=0) return J_S_r_SP, Sigma_S_r_SP + @time_range('LiDAR_noise_model', color_id=0) def LiDAR_noise_model(self, points, C_MB): raise NotImplementedError("LiDAR noise model not implemented yet") # TODO: May need to make this its own kernel... + @time_range('get_ext_euler_angles', color_id=0) def get_ext_euler_angles(self, C): """ Extract Extrinsically defined Euler angles from rotation matrix @@ -273,7 +287,7 @@ def get_ext_euler_angles(self, C): roll = -yaw + self.xp.arctan2(-C[0,1], -C[0,2]) return roll, pitch, yaw - + @time_range('get_z_variance', color_id=0) def get_z_variance(self, points, C_MB, B_r_MB, Sigma_Theta_MB, Sigma_b_r_MB): """ Calculate variance in z direction of map frame for each point. @@ -302,11 +316,14 @@ def get_z_variance(self, points, C_MB, B_r_MB, Sigma_Theta_MB, Sigma_b_r_MB): """ # C_MB, B_r_MB, Sigma_Theta_MB, Sigma_b_r_MB assert C_MB.shape == (3, 3), "C_MB should be a 3x3 matrix" - B_r_MB = make_3x1vec(B_r_MB) + C_MB = cp.asarray(C_MB, dtype=self.data_type) + B_r_MB = cp.asarray(make_3x1vec(B_r_MB), dtype=self.data_type) assert Sigma_Theta_MB.shape == (3, 3), "Sigma_Theta_MB should be a 3x3 matrix" + Sigma_Theta_MB = cp.asarray(Sigma_Theta_MB, dtype=self.data_type) assert Sigma_b_r_MB.shape == (3, 3), "Sigma_b_r_MB should be a 3x3 matrix" - S_r_SP = points # For ease of numpy/cupy notation points are Nx3 i.e. batch index first - assert S_r_SP.shape[1] == 3, "Points should be a Nx3 matrix" + Sigma_b_r_MB = cp.asarray(Sigma_b_r_MB, dtype=self.data_type) + assert points.shape[1] == 3, "Points should be a Nx3 matrix" + S_r_SP = cp.asarray(points, dtype=self.data_type) # For ease of numpy/cupy notation points are Nx3 i.e. batch index first # If using the old SLS noise model, we can ignore error propagation if self.noise_model_name == "SLS_old": @@ -340,6 +357,7 @@ def get_z_variance(self, points, C_MB, B_r_MB, Sigma_Theta_MB, Sigma_b_r_MB): z_var = Sigma_M_r_MP[:, 2, 2] return z_var + @time_range('Jac_of_rot', color_id=0) def Jac_of_rot(self, C, r): """ Calculate Jacobian of rotation matrix C applied to vector r @@ -375,6 +393,7 @@ def Jac_of_rot(self, C, r): if __name__ == "__main__": + print("Starting") xp = cp sensor_ID = "test_sls" C_BS = xp.eye(3)*1.0e0 @@ -400,8 +419,10 @@ def Jac_of_rot(self, C, r): Sigma_Theta_MB = xp.eye(3)*1.0e-6 Sigma_b_r_MB = xp.eye(3)*1.0e-1 # TODO: Deal with data types - z_var = sp.get_z_variance(points, C_MB, B_r_MB, Sigma_Theta_MB, Sigma_b_r_MB) + for i in range(10): + z_var = sp.get_z_variance(points, C_MB, B_r_MB, Sigma_Theta_MB, Sigma_b_r_MB) # print(z_var) - if xp == cp: - from cupyx.profiler import benchmark - print(benchmark(sp.get_z_variance, (points, C_MB, B_r_MB, Sigma_Theta_MB, Sigma_b_r_MB), n_repeat=20)) \ No newline at end of file + # if xp == cp: + # from cupyx.profiler import benchmark + # print(benchmark(sp.get_z_variance, (points, C_MB, B_r_MB, Sigma_Theta_MB, Sigma_b_r_MB), n_repeat=20)) + print("Done!") \ No newline at end of file From 83e6cf890fce24835c1b2c3067832a487b231874 Mon Sep 17 00:00:00 2001 From: "W. Jacob Wagner" Date: Wed, 8 May 2024 15:21:01 -0500 Subject: [PATCH 6/7] Tweaked computation of sensor noise propagation to speed up calling noise_model --- .../sensor_processor.py | 79 ++++++++++++------- 1 file changed, 51 insertions(+), 28 deletions(-) diff --git a/elevation_mapping_cupy/script/elevation_mapping_cupy/sensor_processor.py b/elevation_mapping_cupy/script/elevation_mapping_cupy/sensor_processor.py index a2352f4c..456c124f 100644 --- a/elevation_mapping_cupy/script/elevation_mapping_cupy/sensor_processor.py +++ b/elevation_mapping_cupy/script/elevation_mapping_cupy/sensor_processor.py @@ -175,12 +175,23 @@ def SLS_noise_model(self, S_r_SP, C_MB): N = S_r_SP.shape[0] sigma_z = self.noise_model_params["b"] - self.noise_model_params["c"] * z + 2 * self.noise_model_params["d"] * z**2 # Repeat sigma_l for each point - sigma_l = self.xp.repeat(sigma_l, N) - # Sigma_S_r_SP = self.xp.diag(self.xp.array([sigma_l**2, sigma_l**2, sigma_z**2])) - Sigma_S_r_SP = self.diag_array_to_stacks(self.xp.array([sigma_l**2, sigma_l**2, sigma_z**2]).T) + # If we wanted to return the full covariance matrix we would need to repeat sigma_l for each point, + # combine with sigma_z, and then diagonalize the matrix. + # sigma_l = self.xp.repeat(sigma_l, N) + # Sigma_S_r_SP = self.diag_array_to_stacks(self.xp.array([sigma_l**2, sigma_l**2, sigma_z**2]).T) + # This is unnecessary computaiton that can be simplified by instead returning the propagated error in + # the map frame J_S_r_SP @ Sigma_S_r_SP @ J_S_r_SP.T J_S_r_SP = C_MB @ self.C_BS + # Break the computation into the lateral and z components + # sensor_Sigma_M_r_MP_l = J_S_r_SP @ Sigma_S_r_SP_l @ J_S_r_SP.T + # simplifield since sigma_l is isotropic, Sigma_xx = Sigma_yy = sigma_l^2, and rest are 0 + sensor_Sigma_M_r_MP_l = (sigma_l**2) * (J_S_r_SP[:, 0:2] @ J_S_r_SP[:, 0:2].T)[self.xp.newaxis, :, :] + # sensor_Sigma_M_r_MP_z = J_S_r_SP @ Sigma_S_r_SP_z @ J_S_r_SP.T + # Also simplified because only Sigma_zz component is non-zero + sensor_Sigma_M_r_MP_z = (sigma_z**2)[:, self.xp.newaxis, self.xp.newaxis] * (J_S_r_SP[:, 2:3] @ J_S_r_SP[:, 2:3].T)[self.xp.newaxis, :, :] + sensor_Sigma_M_r_MP = sensor_Sigma_M_r_MP_l + sensor_Sigma_M_r_MP_z - return J_S_r_SP, Sigma_S_r_SP + return sensor_Sigma_M_r_MP @time_range('SLS_old_noise_model', color_id=0) def SLS_old_noise_model(self, S_r_SP, C_MB): @@ -194,9 +205,11 @@ def SLS_old_noise_model(self, S_r_SP, C_MB): # any rotations will not change the shape of the distribution J_S_r_SP = None z_noise = self.noise_model_params["sensor_noise_factor"] * self.xp.power(S_r_SP[:, 2], 2, dtype=self.data_type) - N = S_r_SP.shape[0] - Sigma_S_r_SP = self.diag_array_to_stacks(self.xp.array([z_noise, z_noise, z_noise]).T) - return J_S_r_SP, Sigma_S_r_SP + # N = S_r_SP.shape[0] + # Sigma_S_r_SP = self.diag_array_to_stacks(self.xp.array([z_noise, z_noise, z_noise]).T) + # TODO: Improve this using sparse diagonals + sensor_Sigma_M_r_MP = z_noise[:, self.xp.newaxis, self.xp.newaxis] * self.xp.eye(3, dtype=self.data_type)[self.xp.newaxis, :, :] + return sensor_Sigma_M_r_MP @time_range('constant_noise_model', color_id=0) def constant_noise_model(self, S_r_SP, C_MB): @@ -208,9 +221,11 @@ def constant_noise_model(self, S_r_SP, C_MB): J_S_r_SP = None var = self.noise_model_params["constant_variance"] N = S_r_SP.shape[0] - Sigma = self.xp.diag(self.xp.array([var, var, var], dtype=self.data_type)) - Sigma_S_r_SP = self.xp.repeat(Sigma[np.newaxis, :, :], N, axis=0) - return J_S_r_SP, Sigma_S_r_SP + # Sigma = self.xp.diag(self.xp.array([var, var, var], dtype=self.data_type)) + # Sigma_S_r_SP = self.xp.repeat(Sigma[xp.newaxis, :, :], N, axis=0) + # TODO: Improve this using sparse diagonals or somehow avoid the repeat + sensor_Sigma_M_r_MP = self.xp.repeat(self.xp.diag(var * self.xp.ones(3, dtype=self.data_type))[self.xp.newaxis, :, :], N, axis=0) + return sensor_Sigma_M_r_MP @time_range('LiDAR_noise_model', color_id=0) def LiDAR_noise_model(self, points, C_MB): @@ -325,21 +340,19 @@ def get_z_variance(self, points, C_MB, B_r_MB, Sigma_Theta_MB, Sigma_b_r_MB): assert points.shape[1] == 3, "Points should be a Nx3 matrix" S_r_SP = cp.asarray(points, dtype=self.data_type) # For ease of numpy/cupy notation points are Nx3 i.e. batch index first - # If using the old SLS noise model, we can ignore error propagation - if self.noise_model_name == "SLS_old": - J_S_r_SP, Sigma_S_r_SP = self.noise_model(S_r_SP, C_MB) - return Sigma_S_r_SP[:, 2, 2] - elif self.noise_model_name == "constant": - J_S_r_SP, Sigma_S_r_SP = self.noise_model(S_r_SP, C_MB) - return Sigma_S_r_SP[:, 2, 2] + # If using the old SLS or constant noise model, we can ignore error propagation + if self.noise_model_name in ["SLS_old", "constant"]: + sensor_Sigma_M_r_MP = self.noise_model(S_r_SP, C_MB) + return sensor_Sigma_M_r_MP[:, 2, 2] # Error propagation + # TODO: Room for speedups here by looking at how Jac_of_rot is used # J_M_r_MB = I # Jacobian of M_r_MP wrt M_r_MB B_r_BP = (self.C_BS @ S_r_SP.T + self.B_r_BS).T # B_r_BP = C_BS @ S_r_SP + B_r_BS J_Theta_MB = self.Jac_of_rot(C_MB, B_r_BP) # Jacobian of M_r_MP wrt Theta_MB J_B_r_BS = C_MB # Jacobian of M_r_MP wrt B_r_BS J_Theta_BS = C_MB @ self.Jac_of_rot(self.C_BS, S_r_SP) # Jacobian of M_r_MP wrt Theta_BS - J_S_r_SP, Sigma_S_r_SP = self.noise_model(S_r_SP, C_MB) # Jacobian of M_r_MP wrt S_r_SP + sensor_Sigma_M_r_MP = self.noise_model(S_r_SP, C_MB) # Sensor noise propagated to map frame Sigma_Theta_BS = self.noise_model_params["Sigma_Theta_BS"] Sigma_b_r_BS = self.noise_model_params["Sigma_b_r_BS"] @@ -351,7 +364,7 @@ def get_z_variance(self, points, C_MB, B_r_MB, Sigma_Theta_MB, Sigma_b_r_MB): J_Theta_MB @ Sigma_Theta_MB @ J_Theta_MB.transpose((0,2,1)) + \ J_B_r_BS @ Sigma_b_r_BS @ J_B_r_BS.T + \ J_Theta_BS @ Sigma_Theta_BS @ J_Theta_BS.transpose((0,2,1)) + \ - J_S_r_SP @ Sigma_S_r_SP @ J_S_r_SP.T + sensor_Sigma_M_r_MP # Pull out the z variance z_var = Sigma_M_r_MP[:, 2, 2] @@ -409,20 +422,30 @@ def Jac_of_rot(self, C, r): "d": (42e-3+2.0e-3)/2.0, "Sigma_Theta_BS_diag": Sigma_Theta_BS_diag, "Sigma_b_r_BS_diag": Sigma_b_r_BS_diag} - sp = SensorProcessor(sensor_ID, noise_model_name, SLS_params, xp) + SLS_old_params = {"sensor_noise_factor": 0.05} + constant_params = {"constant_variance": 0.01} + if noise_model_name == "SLS": + sensor_params = SLS_params + elif noise_model_name == "SLS_old": + sensor_params = SLS_old_params + elif noise_model_name == "constant": + sensor_params = constant_params + sp = SensorProcessor(sensor_ID, noise_model_name, sensor_params, xp) sp.set_BS_transform(C_BS, B_r_BS) # points = xp.array([[0, 0, 1.0], [0.0, 0.0, 2.0], [0.0, 0.0, 3.0], [1.0, 2.0, 3.0]]) # Generate a larger number of points randomly in range of -20 to 20 in each dimension - points = xp.random.uniform(-20, 20, (1000, 3)) + points = xp.random.uniform(-20, 20, (10000, 3)) C_MB = xp.eye(3)*1.0e0 B_r_MB = xp.array([10.0, 0, 0]) Sigma_Theta_MB = xp.eye(3)*1.0e-6 Sigma_b_r_MB = xp.eye(3)*1.0e-1 - # TODO: Deal with data types - for i in range(10): - z_var = sp.get_z_variance(points, C_MB, B_r_MB, Sigma_Theta_MB, Sigma_b_r_MB) - # print(z_var) - # if xp == cp: - # from cupyx.profiler import benchmark - # print(benchmark(sp.get_z_variance, (points, C_MB, B_r_MB, Sigma_Theta_MB, Sigma_b_r_MB), n_repeat=20)) + + # For profiling using Nsight Systems + # for i in range(10): + # z_var = sp.get_z_variance(points, C_MB, B_r_MB, Sigma_Theta_MB, Sigma_b_r_MB) + + # For profiling directly in the script + if xp == cp: + from cupyx.profiler import benchmark + print(benchmark(sp.get_z_variance, (points, C_MB, B_r_MB, Sigma_Theta_MB, Sigma_b_r_MB), n_repeat=20)) print("Done!") \ No newline at end of file From 924dc84aa766da03ca677cbed7a9ac211f8c9d55 Mon Sep 17 00:00:00 2001 From: "W. Jacob Wagner" Date: Thu, 9 May 2024 11:00:27 -0500 Subject: [PATCH 7/7] Added LiDAR Noise model Made some speedups in sensor_processor --- .../config/core/example_setup.yaml | 8 +- .../sensor_processor.py | 101 +++++++++++++++--- 2 files changed, 91 insertions(+), 18 deletions(-) diff --git a/elevation_mapping_cupy/config/core/example_setup.yaml b/elevation_mapping_cupy/config/core/example_setup.yaml index 5946ab78..3f8e0387 100644 --- a/elevation_mapping_cupy/config/core/example_setup.yaml +++ b/elevation_mapping_cupy/config/core/example_setup.yaml @@ -28,7 +28,7 @@ subscribers: front_cam: topic_name: '/camera/depth/points' data_type: pointcloud - noise_model_name: SLS + noise_model_name: LiDAR SLS_old_noise_model_params: sensor_noise_factor: 0.05 # point's noise is sensor_noise_factor*z^2 (z is distance from sensor). constant_noise_model_params: @@ -41,6 +41,12 @@ subscribers: d: 22.0e-3 Sigma_Theta_BS_diag: [1.0e-1, 1.0e-1, 1.0e-1] Sigma_b_r_BS_diag: [1.0e-3, 1.0e-3, 1.0e-3] + LiDAR_noise_model_params: + a: 0.012 + b: 6.8 + c: 0.81 + Sigma_Theta_BS_diag: [1.0e-1, 1.0e-1, 1.0e-1] + Sigma_b_r_BS_diag: [1.0e-3, 1.0e-3, 1.0e-3] color_cam: # for color camera topic_name: '/camera/rgb/image_raw' camera_info_topic_name: '/camera/depth/camera_info' diff --git a/elevation_mapping_cupy/script/elevation_mapping_cupy/sensor_processor.py b/elevation_mapping_cupy/script/elevation_mapping_cupy/sensor_processor.py index 456c124f..8932f8e1 100644 --- a/elevation_mapping_cupy/script/elevation_mapping_cupy/sensor_processor.py +++ b/elevation_mapping_cupy/script/elevation_mapping_cupy/sensor_processor.py @@ -228,8 +228,63 @@ def constant_noise_model(self, S_r_SP, C_MB): return sensor_Sigma_M_r_MP @time_range('LiDAR_noise_model', color_id=0) - def LiDAR_noise_model(self, points, C_MB): - raise NotImplementedError("LiDAR noise model not implemented yet") + def LiDAR_noise_model(self, S_r_SP, C_MB): + """ + Noise model for LiDAR sensor based on the paper: + Noise characterization of depth sensors for surface inspections, Pomerleau et. al., 2012 + A coordinate frame is defined for each point with the z-axis pointing from the sensor to the point. + The XY plane for that point is defined as normal to the z-axis. + The noise model is defined as: + sigma_d = a (depth noise in meters) + sigma_r = (b*depth-c)/1000 (lateral noise in meters) + where depth is the distance from the sensor to the point along the z-axis and a, b, and c are constants. + Note that the naming convention is consistent with the paper for clarity even though another choice + of variable names may be more intuitive. + """ + # First define the coordinate frame for each point + # The z-axis is the unit vector from the sensor to the point + depth = self.xp.linalg.norm(S_r_SP, axis=1) + ez = S_r_SP / depth[:, self.xp.newaxis] + # Solve for the x axis by noting that the dot product of the x axis with the z axis is 0 + # and assuming that the ex_z component is 0 + # ex = [ax, ay, az] and ez = [bz, by, bz] + # First handle degenerate case where bx == 0 + ex = self.xp.zeros_like(ez, dtype=self.data_type) + degen_case = ez[:, 0] == 0.0 + # ay = 0 + ex[degen_case, 1] = 0.0 + # ax = 1 if bz > 0 else -1 + # RESUME HERE: Need to check indexing for degen case and double check math + ex[degen_case, 0] = self.xp.where(ez[degen_case, 2] > 0, 1.0, -1.0) + # ay = sqrt((1 + by^2/bx^2))^-1 + ex[~degen_case, 1] = self.xp.sqrt(1.0/(1 + ez[~degen_case, 1]**2 / ez[~degen_case, 0]**2)) + # ax = -by/bx * ay + ex[~degen_case, 0] = -ez[~degen_case, 1] / ez[~degen_case, 0] * ex[~degen_case, 1] + + # The y axis is the cross product of the z and x axes + ey = self.xp.cross(ez, ex) + # Now check that the dot product between the basis vectors is 0 + # assert self.xp.all(self.xp.abs(self.xp.sum(ex * ez, axis=1)) <= 1.0e-6), "ex and ez should be orthogonal" + # assert self.xp.all(self.xp.abs(self.xp.sum(ey * ez, axis=1)) <= 1.0e-6), "ey and ez should be orthogonal" + # assert self.xp.all(self.xp.abs(self.xp.sum(ex * ey, axis=1)) <= 1.0e-6), "ex and ey should be orthogonal" + + # The rotation matrix from the sensor frame to the point frame is the transpose of the rotation matrix + # from the point frame to the sensor frame + # C_PS = self.xp. + C_SP = self.xp.stack([ex, ey, ez], axis=1) + + # Now we can calculate the noise + sigma_d = self.noise_model_params["a"] + sigma_r = (self.noise_model_params["b"] * depth - self.noise_model_params["c"]) / 1000 + + # Propagate the error + J_P_r_SP = C_MB @ self.C_BS @ C_SP + # Break the computation into the tangent (sigma_d) and radial (sigma_r) components + sensor_Sigma_M_r_MP_d = (sigma_d**2) * (J_P_r_SP[:, :, 2:3] @ J_P_r_SP[:, :, 2:3].transpose((0,2,1))) + sensor_Sigma_M_r_MP_r = (sigma_r**2)[:,self.xp.newaxis, self.xp.newaxis] * (J_P_r_SP[:, :, 0:2] @ J_P_r_SP[:, :, 0:2].transpose((0,2,1))) + sensor_Sigma_M_r_MP = sensor_Sigma_M_r_MP_d + sensor_Sigma_M_r_MP_r + + return sensor_Sigma_M_r_MP # TODO: May need to make this its own kernel... @time_range('get_ext_euler_angles', color_id=0) @@ -273,22 +328,25 @@ def get_ext_euler_angles(self, C): """ if self.xp.abs(C[2, 0]) != 1.0: pitch1 = -self.xp.arcsin(C[2,0]) # Different compared to paper due to - pitch2 = self.xp.pi - pitch1 roll1 = self.xp.arctan2(C[2,1]/self.xp.cos(pitch1), C[2,2]/self.xp.cos(pitch1)) - roll2 = self.xp.arctan2(C[2,1]/self.xp.cos(pitch2), C[2,2]/self.xp.cos(pitch2)) yaw1 = self.xp.arctan2(C[1,0]/self.xp.cos(pitch1), C[0,0]/self.xp.cos(pitch1)) - yaw2 = self.xp.arctan2(C[1,0]/self.xp.cos(pitch2), C[0,0]/self.xp.cos(pitch2)) # Select the correct set of angles a1_valid = roll1 >= -self.xp.pi and roll1 <= self.xp.pi a1_valid = a1_valid and (pitch1 >= -self.xp.pi and pitch1 <= self.xp.pi) a1_valid = a1_valid and (yaw1 >= 0 and yaw1 <= self.xp.pi) + if a1_valid: + roll, pitch, yaw = roll1, pitch1, yaw1 + return roll, pitch, yaw + # else: + pitch2 = self.xp.pi - pitch1 + roll2 = self.xp.arctan2(C[2,1]/self.xp.cos(pitch2), C[2,2]/self.xp.cos(pitch2)) + yaw2 = self.xp.arctan2(C[1,0]/self.xp.cos(pitch2), C[0,0]/self.xp.cos(pitch2)) a2_valid = roll2 >= -self.xp.pi and roll2 <= self.xp.pi a2_valid = a2_valid and (pitch2 >= -self.xp.pi and pitch2 <= self.xp.pi) a2_valid = a2_valid and (yaw2 >= 0 and yaw2 <= self.xp.pi) - if a1_valid: - roll, pitch, yaw = roll1, pitch1, yaw1 - elif a2_valid: + if a2_valid: roll, pitch, yaw = roll2, pitch2, yaw2 + return roll, pitch, yaw else: raise ValueError("No valid set of Euler angles found") @@ -329,14 +387,9 @@ def get_z_variance(self, points, C_MB, B_r_MB, Sigma_Theta_MB, Sigma_b_r_MB): Returns: cupy._core.core.ndarray: Variance of each point along z direction of map frame. """ - # C_MB, B_r_MB, Sigma_Theta_MB, Sigma_b_r_MB + # Check the inputs relevant to the non-error propagation noise models first assert C_MB.shape == (3, 3), "C_MB should be a 3x3 matrix" C_MB = cp.asarray(C_MB, dtype=self.data_type) - B_r_MB = cp.asarray(make_3x1vec(B_r_MB), dtype=self.data_type) - assert Sigma_Theta_MB.shape == (3, 3), "Sigma_Theta_MB should be a 3x3 matrix" - Sigma_Theta_MB = cp.asarray(Sigma_Theta_MB, dtype=self.data_type) - assert Sigma_b_r_MB.shape == (3, 3), "Sigma_b_r_MB should be a 3x3 matrix" - Sigma_b_r_MB = cp.asarray(Sigma_b_r_MB, dtype=self.data_type) assert points.shape[1] == 3, "Points should be a Nx3 matrix" S_r_SP = cp.asarray(points, dtype=self.data_type) # For ease of numpy/cupy notation points are Nx3 i.e. batch index first @@ -345,6 +398,13 @@ def get_z_variance(self, points, C_MB, B_r_MB, Sigma_Theta_MB, Sigma_b_r_MB): sensor_Sigma_M_r_MP = self.noise_model(S_r_SP, C_MB) return sensor_Sigma_M_r_MP[:, 2, 2] + # Check the other inputs + B_r_MB = cp.asarray(make_3x1vec(B_r_MB), dtype=self.data_type) + assert Sigma_Theta_MB.shape == (3, 3), "Sigma_Theta_MB should be a 3x3 matrix" + Sigma_Theta_MB = cp.asarray(Sigma_Theta_MB, dtype=self.data_type) + assert Sigma_b_r_MB.shape == (3, 3), "Sigma_b_r_MB should be a 3x3 matrix" + Sigma_b_r_MB = cp.asarray(Sigma_b_r_MB, dtype=self.data_type) + # Error propagation # TODO: Room for speedups here by looking at how Jac_of_rot is used # J_M_r_MB = I # Jacobian of M_r_MP wrt M_r_MB @@ -408,12 +468,12 @@ def Jac_of_rot(self, C, r): if __name__ == "__main__": print("Starting") xp = cp - sensor_ID = "test_sls" + sensor_ID = "test_sensor" C_BS = xp.eye(3)*1.0e0 B_r_BS = xp.array([0.0, 0.0, 0.0]) Sigma_Theta_BS_diag = xp.array([1.0e-1, 1.0e-1, 1.0e-1]) Sigma_b_r_BS_diag = xp.array([1.0e-3, 1.0e-3, 1.0e-3]) - noise_model_name = "SLS" + noise_model_name = "LiDAR" # Taking parameters from equaiton 6 in the paper # sigma_z = b - c * z + 2 * d * z^2 SLS_params = {"a": 6.8e-3, @@ -424,12 +484,19 @@ def Jac_of_rot(self, C, r): "Sigma_b_r_BS_diag": Sigma_b_r_BS_diag} SLS_old_params = {"sensor_noise_factor": 0.05} constant_params = {"constant_variance": 0.01} + LiDAR_params = {"a": 0.012, + "b": 6.8, + "c": 0.81, + "Sigma_Theta_BS_diag": Sigma_Theta_BS_diag, + "Sigma_b_r_BS_diag": Sigma_b_r_BS_diag} if noise_model_name == "SLS": sensor_params = SLS_params elif noise_model_name == "SLS_old": sensor_params = SLS_old_params elif noise_model_name == "constant": sensor_params = constant_params + elif noise_model_name == "LiDAR": + sensor_params = LiDAR_params sp = SensorProcessor(sensor_ID, noise_model_name, sensor_params, xp) sp.set_BS_transform(C_BS, B_r_BS) # points = xp.array([[0, 0, 1.0], [0.0, 0.0, 2.0], [0.0, 0.0, 3.0], [1.0, 2.0, 3.0]]) @@ -441,7 +508,7 @@ def Jac_of_rot(self, C, r): Sigma_b_r_MB = xp.eye(3)*1.0e-1 # For profiling using Nsight Systems - # for i in range(10): + # for i in range(20): # z_var = sp.get_z_variance(points, C_MB, B_r_MB, Sigma_Theta_MB, Sigma_b_r_MB) # For profiling directly in the script