diff --git a/beluga/include/beluga/algorithm.hpp b/beluga/include/beluga/algorithm.hpp index 773b4ec29..e663896be 100644 --- a/beluga/include/beluga/algorithm.hpp +++ b/beluga/include/beluga/algorithm.hpp @@ -20,6 +20,7 @@ * \brief Includes all beluga algorithms. */ +#include #include #include #include diff --git a/beluga/include/beluga/algorithm/cluster_based_estimator.hpp b/beluga/include/beluga/algorithm/cluster_based_estimator.hpp index 55219d26c..7efc51b75 100644 --- a/beluga/include/beluga/algorithm/cluster_based_estimator.hpp +++ b/beluga/include/beluga/algorithm/cluster_based_estimator.hpp @@ -12,18 +12,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -#ifndef BELUGA_ESTIMATION_CLUSTER_BASED_ESTIMATOR_HPP -#define BELUGA_ESTIMATION_CLUSTER_BASED_ESTIMATOR_HPP +#ifndef BELUGA_ALGORITHM_CLUSTER_BASED_ESTIMATOR_HPP +#define BELUGA_ALGORITHM_CLUSTER_BASED_ESTIMATOR_HPP // standard library -#include -#include -#include +#include #include #include -#include #include -#include #include #include @@ -34,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -45,181 +42,164 @@ /** * \file - * \brief Implementation of a cluster-base estimator mixin. + * \brief Implementation of a cluster-based estimation algorithm. */ namespace beluga { -/// \brief A struct that holds the data of a single cell in the grid. -struct GridCellData { - double weight{0.0}; ///< average weight of the cell - std::size_t num_particles{0}; ///< number of particles in the cell - Sophus::SE2d representative_pose_in_world; ///< state of a particle that is within the cell - std::optional cluster_id; ///< cluster id of the cell -}; - -/// \brief A map that holds the sparse data about the particles grouped in cells. Used by the clusterization algorithm. -using GridCellDataMap2D = std::unordered_map; - -/// \brief Populate the grid cell data map with the data from the particles and their weights. -/// \tparam GridCellDataMapType Type of the grid cell data map. -/// \tparam Range Type of the states range. -/// \tparam Weights Type of the weights range. -/// \tparam Hashes Type of the hashes range. -/// \param states Range containing the states of the particles. -/// \param weights Range containing the weights of the particles. -/// \param hashes Range containing the hashes of the particles. -/// \return New instance of the grid cell data map populated with the information from the states. -template -[[nodiscard]] auto populate_grid_cell_data_from_particles(Range&& states, Weights&& weights, const Hashes& hashes) { - GridCellDataMapType grid_cell_data; - - // preallocate memory with a very rough estimation of the number of grid_cells we might end up with - grid_cell_data.reserve(states.size() / 5); - - // calculate the accumulated cell weight and save a single representative_pose_in_world for each cell - for (const auto& [state, weight, hash] : ranges::views::zip(states, weights, hashes)) { - auto [it, inserted] = grid_cell_data.try_emplace(hash, GridCellData{}); - it->second.weight += weight; - it->second.num_particles++; - if (inserted) { - it->second.representative_pose_in_world = state; +/// Create a priority queue from a map using a specified projection. +/** + * This function template constructs a priority queue where the elements are + * ordered by a priority value derived from the map's values. The projection + * function is applied to each map value to determine its priority. + * + * \tparam Map The type of the associative container. + * \tparam Proj The type of the projection function. + * \param map The map containing the data to be inserted into the priority queue. + * \param proj The projection function used to compute the priority of each element. + * \return A priority queue where elements are ordered by the priority computed + * from the map's values using the projection function. + */ +template +[[nodiscard]] auto make_priority_queue(const Map& map, Proj&& proj) { + struct KeyWithPriority { + double priority; // priority value used to order the queue (higher value first). + std::size_t key; // hash of the cell in the grid cell data map. + + bool operator<(const KeyWithPriority& other) const { + return priority < other.priority; // sort in decreasing priority order } - } + }; - // normalize the accumulated weight by the number of particles in each cell - // to avoid biasing the clustering algorithm towards cells that randomly end up - // with more particles than others. - for (auto& [hash, entry] : grid_cell_data) { - entry.weight /= static_cast(entry.num_particles); // num_particles is guaranteed to be > 0 - } + const auto make_from_pair = [proj = std::forward(proj)](const auto& pair) { + return KeyWithPriority{std::invoke(proj, pair.second), pair.first}; + }; - return grid_cell_data; -} + const auto range = map | // + ranges::views::transform(make_from_pair) | // + ranges::views::common; -/// \brief Calculate the weight threshold that corresponds to a given percentile of the weights. -/// \tparam GridCellDataMapType Type of the grid cell data map. -/// \param grid_cell_data The grid cell data map. -/// \param threshold The percentile of the weights to calculate the threshold for (range: 0.0 to 1.0) -/// \return Threshold value that corresponds to the given percentile of the weights. -template -[[nodiscard]] auto calculate_percentile_weight_threshold(GridCellDataMapType&& grid_cell_data, double threshold) { - const auto extract_weight_f = [](const auto& grid_cell) { return grid_cell.second.weight; }; - auto weights = grid_cell_data | ranges::views::transform(extract_weight_f) | ranges::to>() | - ranges::actions::sort; - return weights[static_cast(static_cast(weights.size()) * threshold)]; + return std::priority_queue(range.begin(), range.end()); } -/// \brief Cap the weight of each cell in the grid cell data map to a given value. -/// \tparam GridCellDataMapType Type of the grid cell data map. -/// \param grid_cell_data The grid cell data map. -/// \param weight_cap The maximum weight value to be assigned to each cell. -template -void cap_grid_cell_data_weights(GridCellDataMapType&& grid_cell_data, double weight_cap) { - for (auto& [hash, entry] : grid_cell_data) { - const auto capped_weight = std::min(entry.weight, weight_cap); - entry.weight = capped_weight; - } +/// Calculates the threshold value at a specified percentile from a range. +/** + * Find the value that is greater than the given percentage of the numbers in a range. + * + * \tparam Range The type of the input range containing the values. + * \param range The input range of values from which to calculate the percentile threshold. + * \param percentile The percentile (between 0 and 1) to calculate the threshold for. + * \return The value at the specified percentile in the sorted range. + */ +template +[[nodiscard]] auto calculate_percentile_threshold(Range&& range, double percentile) { + auto values = range | ranges::to | ranges::actions::sort; + return values[static_cast(static_cast(values.size()) * percentile)]; } -/// \brief Creates the priority queue used by the clustering information from the grid cell data map. -/// \tparam GridCellDataMapType Type of the grid cell data map. -/// \param grid_cell_data The grid cell data map. -/// \return A priority queue containing the information from the grid cell data map. -template -[[nodiscard]] auto populate_priority_queue(GridCellDataMapType&& grid_cell_data) { - struct PriorityQueueItem { - double priority; // priority value used to order the queue (higher value first). - std::size_t hash; // hash of the cell in the grid cell data map. +struct ParticleClusterizerImpl { + /// A struct that holds the data of a single cell for the clusterization algorithm. + template + struct Cell { + State representative_state; ///< state of a particle that is within the cell + double weight{0.0}; ///< average weight of the cell + std::size_t num_particles{0}; ///< number of particles in the cell + std::optional cluster_id; ///< cluster id of the cell }; - struct PriorityQueueItemCompare { - constexpr bool operator()(const PriorityQueueItem& lhs, const PriorityQueueItem& rhs) const { - return lhs.priority < rhs.priority; // sort in decreasing priority order + /// A map that holds the sparse data about the particles grouped in cells. + template + using Map = std::unordered_map>; + + template + [[nodiscard]] static auto make_cluster_map(States&& states, Weights&& weights, Hashes&& hashes) { + using State = ranges::range_value_t; + Map map; + + // preallocate memory with a very rough estimation of the number of grid_cells we might end up with + map.reserve(states.size() / 5); + + // calculate the accumulated cell weight and save a single representative state for each cell + for (const auto& [state, weight, hash] : ranges::views::zip(states, weights, hashes)) { + auto [it, inserted] = map.try_emplace(hash, Cell{}); + auto& [_, entry] = *it; + entry.weight += weight; + entry.num_particles++; + if (inserted) { + entry.representative_state = state; + } } - }; - const auto cell_data_to_queue_item = [](const auto& grid_cell) { - return PriorityQueueItem{grid_cell.second.weight, grid_cell.first}; - }; + return map; + } - auto queue_container = grid_cell_data | // - ranges::views::transform(cell_data_to_queue_item) | // - ranges::to>(); // - return std::priority_queue, PriorityQueueItemCompare>( - PriorityQueueItemCompare{}, std::move(queue_container)); -} + template + static void normalize_and_cap_weights(Map& map, double percentile) { + auto entries = ranges::views::values(map); -/// \brief Function that runs the clustering algorithm and assigns a cluster id to each cell in the grid cell data map. -/// \tparam GridCellDataMapType Type of the grid cell data map. -/// \tparam Hasher Type of the hash function used to convert states into hashes. -/// \tparam Neighbors Type of the range containing the neighbors of a cell. -/// \param grid_cell_data The grid cell data map. -/// \param spatial_hash_function_ The hash object instance. -/// \param neighbors Range containing the neighbors of a cell. -/// \param weight_cap The maximum weight value to be assigned to each cell. -template -void map_cells_to_clusters( - GridCellDataMapType&& grid_cell_data, - Hasher&& spatial_hash_function_, - Neighbors&& neighbors, - double weight_cap) { - auto grid_cell_queue = populate_priority_queue(grid_cell_data); - - std::size_t next_cluster_id = 0; - - while (!grid_cell_queue.empty()) { - const auto grid_cell_hash = grid_cell_queue.top().hash; - grid_cell_queue.pop(); - - // any hash that comes out of the queue is known to exist in the cell data map - auto& grid_cell = grid_cell_data[grid_cell_hash]; - const auto& grid_cell_weight = grid_cell.weight; - const auto& representative_pose_in_world = grid_cell.representative_pose_in_world; - - // if there's no cluster id assigned to the cell, assign it a new one - if (!grid_cell.cluster_id.has_value()) { - grid_cell.cluster_id = next_cluster_id++; + // normalize the accumulated weight by the number of particles in each cell + // to avoid biasing the clustering algorithm towards cells that randomly end up + // with more particles than others. + for (auto& entry : entries) { + assert(entry.num_particles > 0); + entry.weight /= static_cast(entry.num_particles); } - // process the neighbors of the cell; if they don't have a cluster id already assigned - // then assign them one and add them to the queue with an inflated priority - // to ensure they get processed ASAP before moving on to other local peaks. - // Notice that with this algorithm each cell will go through the priority queue at most twice. - - const auto get_neighbor_hash = [&](const auto& neighbor_pose_in_representative) { - return spatial_hash_function_(representative_pose_in_world * neighbor_pose_in_representative); - }; - - const auto filter_invalid_neighbors = [&](const auto& neighbor_hash) { - auto it = grid_cell_data.find(neighbor_hash); - return ( - (it != grid_cell_data.end()) && // is within the map - (!it->second.cluster_id.has_value()) && // AND not yet mapped to a cluster - (it->second.weight <= grid_cell_weight)); // AND has lower weight than the current cell - }; - - auto valid_neighbor_hashes_view = // - neighbors | // - ranges::views::transform(get_neighbor_hash) | // - ranges::views::cache1 | // - ranges::views::filter(filter_invalid_neighbors); // - - for (const auto& neighbor_hash : valid_neighbor_hashes_view) { - auto& neighbor = grid_cell_data[neighbor_hash]; - neighbor.cluster_id = grid_cell.cluster_id; - const auto inflated_priority = - weight_cap + neighbor.weight; // since weights are capped at weight_cap, this gives us a value that is - // guaranteed to be higher than any other weight from a local maximum. - grid_cell_queue.push({inflated_priority, neighbor_hash}); // reintroduce with inflated priority + const double max_weight = + calculate_percentile_threshold(ranges::views::transform(entries, &Cell::weight), percentile); + + for (auto& entry : entries) { + entry.weight = std::min(entry.weight, max_weight); } } -} -/// Parameters used to construct a ClusterBasedEstimator instance. -struct ClusterBasedStateEstimatorParam { + template + static void assign_clusters(Map& map, NeighborsFunction&& neighbors_function) { + auto queue = make_priority_queue(map, &Cell::weight); + const auto max_priority = queue.top().priority; + + std::size_t next_cluster_id = 0; + + while (!queue.empty()) { + const auto hash = queue.top().key; + queue.pop(); + + // any hash that comes out of the queue is known to exist in the cell data map + auto& grid_cell = map[hash]; + + // if there's no cluster id assigned to the cell, assign a new one + if (!grid_cell.cluster_id.has_value()) { + grid_cell.cluster_id = next_cluster_id++; + } + + // process the neighbors of the cell; if they don't have a cluster id already assigned + // then assign them one and add them to the queue with an inflated priority + // to ensure they get processed ASAP before moving on to other local peaks. + // Notice that with this algorithm each cell will go through the priority queue at most twice. + + const auto is_valid_neighbor = [&](const auto& neighbor_hash) { + auto it = map.find(neighbor_hash); + return ( + (it != map.end()) && // is within the map + (!it->second.cluster_id.has_value()) && // AND not yet mapped to a cluster + (it->second.weight <= grid_cell.weight)); // AND has lower weight than the current cell + }; + + for (const std::size_t neighbor_hash : neighbors_function(grid_cell.representative_state) | // + ranges::views::cache1 | // + ranges::views::filter(is_valid_neighbor)) { + auto& neighbor = map[neighbor_hash]; + neighbor.cluster_id = grid_cell.cluster_id; + queue.push({max_priority + neighbor.weight, neighbor_hash}); // reintroduce with inflated priority + } + } + } +}; + +/// Parameters used to construct a ParticleClusterizer instance. +struct ParticleClusterizerParam { double spatial_hash_resolution = 0.20; ///< clustering algorithm spatial resolution - double angular_hash_hesolution = 0.524; ///< clustering algorithm angular resolution + double angular_hash_resolution = 0.524; ///< clustering algorithm angular resolution /// Cluster weight cap threshold. /** @@ -234,73 +214,58 @@ struct ClusterBasedStateEstimatorParam { double weight_cap_percentile = 0.90; }; -/// Primary template for a cluster-based estimation algorithm. -/** - * Particles are grouped into clusters around local maxima, and the state mean and covariance - * of the cluster with the highest total weight is returned. - * - * This class implements the EstimationInterface interface - * and satisfies \ref StateEstimatorPage. - */ -class ClusterBasedStateEstimator { +class ParticleClusterizer { public: - using param_type = ClusterBasedStateEstimatorParam; ///< Type of the parameters used to construct the estimator. + explicit ParticleClusterizer(const ParticleClusterizerParam& parameters) : parameters_{parameters} {} - /// Constructs a ClusterBasedStateEstimator instance. - /** - * \param parameters Algorithm parameters. - */ - explicit ClusterBasedStateEstimator(const param_type& parameters) : parameters_{parameters} {} + [[nodiscard]] auto neighbors(const Sophus::SE2d& pose) const { + return adjacent_grid_cells_ | // + ranges::views::transform([&pose](const Sophus::SE2d& neighbor_pose) { return pose * neighbor_pose; }) | + ranges::views::transform(spatial_hash_function_); + } - /// Estimate the weighted mean and covariance of largest (largest aggregated weight) cluster within the particle set. - /** - * \tparam Particles The type of the states range. - * \param particles The particle set. - * \return The weighted mean state and covariance of the largest cluster. - */ template - [[nodiscard]] auto estimate(Particles&& particles) const; + [[nodiscard]] auto operator()(Particles&& particles) { + auto states = beluga::views::states(particles); + auto weights = beluga::views::weights(particles); + auto hashes = states | ranges::views::transform(spatial_hash_function_) | ranges::to; + + auto map = ParticleClusterizerImpl::make_cluster_map(states, weights, hashes); + ParticleClusterizerImpl::normalize_and_cap_weights(map, parameters_.weight_cap_percentile); + ParticleClusterizerImpl::assign_clusters(map, [this](const auto& state) { return neighbors(state); }); + + return hashes | // + ranges::views::transform([&map](std::size_t hash) { return map[hash].cluster_id.value(); }) | + ranges::to; + } private: - param_type parameters_; + ParticleClusterizerParam parameters_; - /// \brief spatial hash function used to group particles in cells - const beluga::spatial_hash spatial_hash_function_{ + beluga::spatial_hash spatial_hash_function_{ parameters_.spatial_hash_resolution, // x parameters_.spatial_hash_resolution, // y - parameters_.angular_hash_hesolution // theta + parameters_.angular_hash_resolution // theta }; - /// \brief Adjacent grid cells used by the clustering algorithm. - const std::vector adjacent_grid_cells_ = { + std::array adjacent_grid_cells_ = { Sophus::SE2d{Sophus::SO2d{0.0}, Sophus::Vector2d{+parameters_.spatial_hash_resolution, 0.0}}, Sophus::SE2d{Sophus::SO2d{0.0}, Sophus::Vector2d{-parameters_.spatial_hash_resolution, 0.0}}, Sophus::SE2d{Sophus::SO2d{0.0}, Sophus::Vector2d{0.0, +parameters_.spatial_hash_resolution}}, Sophus::SE2d{Sophus::SO2d{0.0}, Sophus::Vector2d{0.0, -parameters_.spatial_hash_resolution}}, - Sophus::SE2d{Sophus::SO2d{+parameters_.angular_hash_hesolution}, Sophus::Vector2d{0.0, 0.0}}, - Sophus::SE2d{Sophus::SO2d{-parameters_.angular_hash_hesolution}, Sophus::Vector2d{0.0, 0.0}}, + Sophus::SE2d{Sophus::SO2d{+parameters_.angular_hash_resolution}, Sophus::Vector2d{0.0, 0.0}}, + Sophus::SE2d{Sophus::SO2d{-parameters_.angular_hash_resolution}, Sophus::Vector2d{0.0, 0.0}}, }; }; +/// Computes a cluster based estimate from a particle set. +/** + * Particles are grouped into clusters around local maxima, and the state mean and covariance + * of the cluster with the highest total weight is returned. + */ template -auto ClusterBasedStateEstimator::estimate(Particles&& particles) const { - auto hashes = beluga::views::states(particles) | ranges::views::transform(spatial_hash_function_) | - ranges::to>(); - - auto grid_cell_data = populate_grid_cell_data_from_particles( - beluga::views::states(particles), beluga::views::weights(particles), hashes); - - const auto weight_cap = calculate_percentile_weight_threshold(grid_cell_data, parameters_.weight_cap_percentile); - - cap_grid_cell_data_weights(grid_cell_data, weight_cap); - map_cells_to_clusters(grid_cell_data, spatial_hash_function_, adjacent_grid_cells_, weight_cap); - - const auto cluster_from_hash = [&grid_cell_data](const std::size_t hash) { - const auto& grid_cell = grid_cell_data[hash]; - return grid_cell.cluster_id; - }; - - const auto clusters = hashes | ranges::views::transform(cluster_from_hash) | ranges::views::common; +[[nodiscard]] auto cluster_based_estimate(Particles&& particles, ParticleClusterizerParam parameters = {}) { + const auto clusters = ParticleClusterizer{parameters}(particles); auto per_cluster_estimates = estimate_clusters(beluga::views::states(particles), beluga::views::weights(particles), clusters); @@ -310,8 +275,8 @@ auto ClusterBasedStateEstimator::estimate(Particles&& particles) const { return beluga::estimate(beluga::views::states(particles), beluga::views::weights(particles)); } - const auto [_, mean, covariance] = - *ranges::max_element(per_cluster_estimates, std::less{}, [](const auto& t) { return std::get<0>(t); }); + const auto get_weight = [](const auto& t) { return std::get<0>(t); }; + const auto [_, mean, covariance] = *ranges::max_element(per_cluster_estimates, std::less{}, get_weight); return std::make_pair(mean, covariance); } diff --git a/beluga/include/beluga/algorithm/estimation.hpp b/beluga/include/beluga/algorithm/estimation.hpp index c7ae67cd4..6be4c6aa9 100644 --- a/beluga/include/beluga/algorithm/estimation.hpp +++ b/beluga/include/beluga/algorithm/estimation.hpp @@ -16,9 +16,7 @@ #define BELUGA_ALGORITHM_ESTIMATION_HPP // standard library -#include #include -#include // external #include @@ -261,8 +259,8 @@ std::pair, Sophus::Matrix3> estimate(Poses&& poses) /// \return A vector of tuples, containing the weight, mean and covariance of each cluster, in no particular order. template [[nodiscard]] auto estimate_clusters(Range&& states, Weights&& weights, Clusters&& clusters) { - static constexpr auto weight = [](const auto& t) { return std::get<1>(t); }; - static constexpr auto cluster = [](const auto& t) { return std::get<2>(t); }; + static constexpr auto get_weight = [](const auto& t) { return std::get<1>(t); }; + static constexpr auto get_cluster = [](const auto& t) { return std::get<2>(t); }; auto particles = ranges::views::zip(states, weights, clusters); @@ -275,7 +273,7 @@ template for (const auto id : cluster_ids) { auto filtered_particles = - particles | ranges::views::cache1 | ranges::views::filter([id](const auto& p) { return cluster(p) == id; }); + particles | ranges::views::cache1 | ranges::views::filter([id](const auto& p) { return get_cluster(p) == id; }); const auto particle_count = ranges::distance(filtered_particles); if (particle_count < 2) { @@ -283,7 +281,7 @@ template continue; } - const auto total_weight = ranges::accumulate(filtered_particles, 0.0, std::plus{}, weight); + const auto total_weight = ranges::accumulate(filtered_particles, 0.0, std::plus{}, get_weight); auto filtered_states = filtered_particles | beluga::views::elements<0>; auto filtered_weights = filtered_particles | beluga::views::elements<1>; const auto [mean, covariance] = estimate(filtered_states, filtered_weights); diff --git a/beluga/test/beluga/algorithm/test_cluster_based_estimator.cpp b/beluga/test/beluga/algorithm/test_cluster_based_estimator.cpp index 9b628d410..c0b565614 100644 --- a/beluga/test/beluga/algorithm/test_cluster_based_estimator.cpp +++ b/beluga/test/beluga/algorithm/test_cluster_based_estimator.cpp @@ -54,19 +54,18 @@ struct ClusterBasedEstimationDetailTesting : public ::testing::Test { [[nodiscard]] auto generate_test_grid_cell_data_map() const { constexpr auto kUpperLimit = 30.0; - GridCellDataMap2D map; + ParticleClusterizerImpl::Map map; for (double x = 0.0; x < kUpperLimit; x += 1.0) { const auto weight = x; const auto state = SE2d{SO2d{0.}, Vector2d{x, x}}; - map.emplace(spatial_hash_function(state), GridCellData{weight, 0, state, std::nullopt}); + map.emplace(spatial_hash_function(state), ParticleClusterizerImpl::Cell{state, weight, 0, std::nullopt}); } return map; } [[nodiscard]] static auto make_particle_multicluster_dataset(double xmin, double xmax, double ymin, double ymax, double step) { - std::vector states; - std::vector weights; + std::vector> particles; const auto xwidth = xmax - xmin; const auto ywidth = ymax - ymin; @@ -85,12 +84,11 @@ struct ClusterBasedEstimationDetailTesting : public ::testing::Test { // add a field of zeros around the peaks to ease predicting the mean and // covariance values of the highest peaks weight = std::max(0.0, weight - k / 2.0); - states.emplace_back(SO2d{0.}, Vector2d{x + xmin, y + ymin}); - weights.emplace_back(weight); + particles.emplace_back(SE2d{SO2d{0.}, Vector2d{x + xmin, y + ymin}}, weight); } } - return std::make_tuple(states, weights); + return particles; } }; @@ -122,11 +120,18 @@ TEST_F(ClusterBasedEstimationDetailTesting, GridCellDataMapGenerationStep) { const auto s10 = SE2d{SO2d{2.0}, Vector2d{0.00, 0.00}}; // bin 2 const auto s20 = SE2d{SO2d{2.0}, Vector2d{2.00, 0.00}}; // bin 3 - const auto states = std::vector{s00, s01, s10, s20}; - const auto weights = std::vector{1.5, 0.5, 1.0, 1.0}; + const auto particles = std::vector>{ + std::make_pair(s00, 1.5), + std::make_pair(s01, 0.5), + std::make_pair(s10, 1.0), + std::make_pair(s20, 1.0), + }; - auto hashes = precalculate_particle_hashes(states, spatial_hash_function); - auto test_data = populate_grid_cell_data_from_particles(states, weights, hashes); + auto states = beluga::views::states(particles); + auto weights = beluga::views::weights(particles); + auto hashes = states | ranges::views::transform(spatial_hash_function) | ranges::to; + + auto test_data = ParticleClusterizerImpl::make_cluster_map(states, weights, hashes); const auto hash00 = spatial_hash_function(s00); const auto hash10 = spatial_hash_function(s10); @@ -137,52 +142,26 @@ TEST_F(ClusterBasedEstimationDetailTesting, GridCellDataMapGenerationStep) { ASSERT_NE(test_data.find(hash10), test_data.end()); ASSERT_NE(test_data.find(hash20), test_data.end()); - EXPECT_EQ(test_data[hash00].weight, 1.0); // this one is averaged between both particles + EXPECT_EQ(test_data[hash00].weight, 2.0); EXPECT_EQ(test_data[hash10].weight, 1.0); EXPECT_EQ(test_data[hash20].weight, 1.0); - ASSERT_THAT(test_data[hash00].representative_pose_in_world, SE2Near(s00.so2(), s00.translation(), kTolerance)); - ASSERT_THAT(test_data[hash10].representative_pose_in_world, SE2Near(s10.so2(), s10.translation(), kTolerance)); - ASSERT_THAT(test_data[hash20].representative_pose_in_world, SE2Near(s20.so2(), s20.translation(), kTolerance)); + ASSERT_THAT(test_data[hash00].representative_state, SE2Near(s00.so2(), s00.translation(), kTolerance)); + ASSERT_THAT(test_data[hash10].representative_state, SE2Near(s10.so2(), s10.translation(), kTolerance)); + ASSERT_THAT(test_data[hash20].representative_state, SE2Near(s20.so2(), s20.translation(), kTolerance)); ASSERT_FALSE(test_data[hash00].cluster_id.has_value()); ASSERT_FALSE(test_data[hash10].cluster_id.has_value()); ASSERT_FALSE(test_data[hash20].cluster_id.has_value()); } -TEST_F(ClusterBasedEstimationDetailTesting, WeightThresholdCalculationStep) { - auto map = generate_test_grid_cell_data_map(); - const auto threshold = calculate_percentile_weight_threshold(map, 0.9); - EXPECT_DOUBLE_EQ(threshold, 27.0); -} - -TEST_F(ClusterBasedEstimationDetailTesting, WeightCappingStep) { - // data preparation - auto original_map = generate_test_grid_cell_data_map(); - const auto weight_cap = calculate_percentile_weight_threshold(original_map, 0.9); - - // test proper - auto tested_map = original_map; - cap_grid_cell_data_weights(tested_map, weight_cap); - - for (const auto& [hash, grid_cell] : tested_map) { - const auto original_grid_cell = original_map[hash]; - EXPECT_DOUBLE_EQ(grid_cell.weight, std::min(original_grid_cell.weight, weight_cap)); - } -} - -TEST_F(ClusterBasedEstimationDetailTesting, PriorityQueuePopulationStep) { +TEST_F(ClusterBasedEstimationDetailTesting, MakePriorityQueue) { // data preparation - auto map = generate_test_grid_cell_data_map(); - const auto weight_cap = calculate_percentile_weight_threshold(map, 0.9); - cap_grid_cell_data_weights(map, weight_cap); + auto data = generate_test_grid_cell_data_map(); // test proper - auto prio_queue = populate_priority_queue(map); - EXPECT_EQ(prio_queue.size(), map.size()); - - // the top element necessarily has the highest weight equal to the cap - EXPECT_DOUBLE_EQ(prio_queue.top().priority, weight_cap); + auto prio_queue = make_priority_queue(data, &ParticleClusterizerImpl::Cell::weight); + EXPECT_EQ(prio_queue.size(), data.size()); // from there on the weights should be strictly decreasing auto prev_weight = prio_queue.top().priority; @@ -207,29 +186,34 @@ TEST_F(ClusterBasedEstimationDetailTesting, MapGridCellsToClustersStep) { } } - GridCellDataMap2D map; + ParticleClusterizerImpl::Map map; + for (const auto& [x, y, w] : coordinates) { const auto state = SE2d{SO2d{0.}, Vector2d{x, y}}; - map.emplace(spatial_hash_function(state), GridCellData{w, 0, state, std::nullopt}); + map.emplace(spatial_hash_function(state), ParticleClusterizerImpl::Cell{state, w, 0, std::nullopt}); } - const auto weight_cap = calculate_percentile_weight_threshold(map, 0.9); - - cap_grid_cell_data_weights(map, weight_cap); - - const auto adjacent_grid_cells = { - SE2d{SO2d{0.0}, Vector2d{+kSpatialHashResolution, 0.0}}, - SE2d{SO2d{0.0}, Vector2d{-kSpatialHashResolution, 0.0}}, - SE2d{SO2d{0.0}, Vector2d{0.0, +kSpatialHashResolution}}, - SE2d{SO2d{0.0}, Vector2d{0.0, -kSpatialHashResolution}}, + const auto neighbors = [&](const auto& state) { + static const auto kAdjacentGridCells = { + SE2d{SO2d{0.0}, Vector2d{+kSpatialHashResolution, 0.0}}, + SE2d{SO2d{0.0}, Vector2d{-kSpatialHashResolution, 0.0}}, + SE2d{SO2d{0.0}, Vector2d{0.0, +kSpatialHashResolution}}, + SE2d{SO2d{0.0}, Vector2d{0.0, -kSpatialHashResolution}}, + }; + + return kAdjacentGridCells | // + ranges::views::transform([&state](const Sophus::SE2d& neighbor_pose) { return state * neighbor_pose; }) | + ranges::views::transform(spatial_hash_function); }; // test proper // only cells beyond the 10% weight percentile to avoid the messy border // between clusters beneath that threshold - const auto ten_percent_threshold = calculate_percentile_weight_threshold(map, 0.15); - map_cells_to_clusters(map, spatial_hash_function, adjacent_grid_cells, weight_cap); + const auto ten_percent_threshold = calculate_percentile_threshold( + map | ranges::views::values | ranges::views::transform(&ParticleClusterizerImpl::Cell::weight), 0.15); + + ParticleClusterizerImpl::assign_clusters(map, neighbors); auto cells_above_minimum_threshold_view = coordinates | ranges::views::filter([&](const auto& c) { return std::get<2>(c) >= ten_percent_threshold; }); @@ -302,33 +286,17 @@ TEST_F(ClusterBasedEstimationDetailTesting, MapGridCellsToClustersStep) { EXPECT_EQ(full_field_unique_ids.size(), 4); } -TEST_F(ClusterBasedEstimationDetailTesting, ClusterStatEstimationStep) { +TEST_F(ClusterBasedEstimationDetailTesting, ClusterStateEstimationStep) { const double k_field_side = 36.0; // create a map with four independent peaks - auto [states, weights] = make_particle_multicluster_dataset(0.0, k_field_side, 0.0, k_field_side, 1.0); + auto particles = make_particle_multicluster_dataset(0.0, k_field_side, 0.0, k_field_side, 1.0); - const auto adjacent_grid_cells = { - SE2d{SO2d{0.0}, Vector2d{+kSpatialHashResolution, 0.0}}, - SE2d{SO2d{0.0}, Vector2d{-kSpatialHashResolution, 0.0}}, - SE2d{SO2d{0.0}, Vector2d{0.0, +kSpatialHashResolution}}, - SE2d{SO2d{0.0}, Vector2d{0.0, -kSpatialHashResolution}}, - }; - - auto hashes = precalculate_particle_hashes(states, spatial_hash_function); - auto grid_cell_data = populate_grid_cell_data_from_particles(states, weights, hashes); - const auto weight_cap = calculate_percentile_weight_threshold(grid_cell_data, 0.9); - cap_grid_cell_data_weights(grid_cell_data, weight_cap); - map_cells_to_clusters(grid_cell_data, spatial_hash_function, adjacent_grid_cells, weight_cap); - - const auto cluster_from_hash = [&grid_cell_data](const std::size_t hash) { - const auto& grid_cell = grid_cell_data[hash]; - return grid_cell.cluster_id; - }; + const auto clusters = + ParticleClusterizer{ParticleClusterizerParam{kSpatialHashResolution, kAngularHashResolution, 0.9}}(particles); - const auto clusters = hashes | ranges::views::transform(cluster_from_hash) | ranges::views::common; - - auto per_cluster_estimates = estimate_clusters(states, weights, clusters); + auto per_cluster_estimates = + estimate_clusters(beluga::views::states(particles), beluga::views::weights(particles), clusters); // check that the number of clusters is correct ASSERT_EQ(per_cluster_estimates.size(), 4); @@ -345,33 +313,23 @@ TEST_F(ClusterBasedEstimationDetailTesting, ClusterStatEstimationStep) { EXPECT_THAT(std::get<1>(per_cluster_estimates[3]), SE2Near(SO2d{0.0}, Vector2d{9.0, 9.0}, kTolerance)); } -template -class MockMixin : public Mixin { - public: - MOCK_METHOD(const std::vector&, states, (), (const)); - MOCK_METHOD(const std::vector&, weights, (), (const)); -}; - TEST_F(ClusterBasedEstimationDetailTesting, HeaviestClusterSelectionTest) { - const auto [states, weights] = make_particle_multicluster_dataset(-2.0, +2.0, -2.0, +2.0, 0.025); - - const auto uut = beluga::ClusterBasedStateEstimator{beluga::ClusterBasedStateEstimatorParam{}}; + const auto particles = make_particle_multicluster_dataset(-2.0, +2.0, -2.0, +2.0, 0.025); // determine the expected values of the mean and covariance of the highest // weight cluster const auto max_peak_filter = [](const auto& s) { return s.translation().x() >= 0.0 && s.translation().y() >= 0.0; }; const auto mask_filter = [](const auto& sample) { return std::get<1>(sample); }; - auto max_peak_mask = states | ranges::views::transform(max_peak_filter); - auto max_peak_masked_states = - ranges::views::zip(states, max_peak_mask) | ranges::views::filter(mask_filter) | beluga::views::elements<0>; - auto max_peak_masked_weights = - ranges::views::zip(weights, max_peak_mask) | ranges::views::filter(mask_filter) | beluga::views::elements<0>; + auto max_peak_mask = beluga::views::states(particles) | ranges::views::transform(max_peak_filter); + auto max_peak_masked_states = ranges::views::zip(beluga::views::states(particles), max_peak_mask) | + ranges::views::filter(mask_filter) | beluga::views::elements<0>; + auto max_peak_masked_weights = ranges::views::zip(beluga::views::weights(particles), max_peak_mask) | + ranges::views::filter(mask_filter) | beluga::views::elements<0>; const auto [expected_pose, expected_covariance] = beluga::estimate(max_peak_masked_states, max_peak_masked_weights); - auto particles = ranges::views::zip(states, weights) | ranges::to(); - const auto [pose, covariance] = uut.estimate(particles); + const auto [pose, covariance] = beluga::cluster_based_estimate(particles); ASSERT_THAT(pose, SE2Near(expected_pose.so2(), expected_pose.translation(), kTolerance)); ASSERT_NEAR(covariance(0, 0), expected_covariance(0, 0), 0.001); @@ -386,8 +344,6 @@ TEST_F(ClusterBasedEstimationDetailTesting, HeaviestClusterSelectionTest) { } TEST_F(ClusterBasedEstimationDetailTesting, NightmareDistributionTest) { - const auto uut = beluga::ClusterBasedStateEstimator{beluga::ClusterBasedStateEstimatorParam{}}; - // particles so far away that they are isolated and will therefore form four separate single // particle clusters const auto states = std::vector{ @@ -402,7 +358,7 @@ TEST_F(ClusterBasedEstimationDetailTesting, NightmareDistributionTest) { const auto [expected_pose, expected_covariance] = beluga::estimate(states, weights); auto particles = ranges::views::zip(states, weights) | ranges::to(); - const auto [pose, covariance] = uut.estimate(particles); + const auto [pose, covariance] = beluga::cluster_based_estimate(particles); ASSERT_THAT(pose, SE2Near(expected_pose.so2(), expected_pose.translation(), kTolerance)); ASSERT_NEAR(covariance(0, 0), expected_covariance(0, 0), 0.001);