Skip to content

Commit

Permalink
Address code review comments
Browse files Browse the repository at this point in the history
Signed-off-by: Nahuel Espinosa <[email protected]>
  • Loading branch information
nahueespinosa committed Jul 12, 2024
1 parent 8d62ed1 commit 5124aa3
Show file tree
Hide file tree
Showing 2 changed files with 40 additions and 36 deletions.
55 changes: 28 additions & 27 deletions beluga/include/beluga/algorithm/cluster_based_estimation.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@
// project
#include <beluga/algorithm/estimation.hpp>
#include <beluga/algorithm/spatial_hash.hpp>
#include <beluga/views.hpp>

#if RANGE_V3_MAJOR == 0 && RANGE_V3_MINOR < 12
#include <range/v3/view/group_by.hpp>
Expand All @@ -53,6 +52,8 @@

namespace beluga {

namespace clusterizer_detail {

/// Create a priority queue from a map using a specified projection.
/**
* This function template constructs a priority queue where the elements are
Expand Down Expand Up @@ -106,11 +107,9 @@ template <class Range>
return values[static_cast<std::size_t>(n)];
}

namespace clusterizer_detail {

/// A struct that holds the data of a single cell for the clusterization algorithm.
template <class State>
struct Cell {
struct ClusterCell {
State representative_state; ///< state of a particle that is within the cell
double weight{0.0}; ///< average weight of the cell
std::size_t num_particles{0}; ///< number of particles in the cell
Expand All @@ -119,7 +118,7 @@ struct Cell {

/// A map that holds the sparse data about the particles grouped in cells.
template <class State>
using Map = std::unordered_map<std::size_t, Cell<State>>;
using ClusterMap = std::unordered_map<std::size_t, ClusterCell<State>>;

/// Create a cluster map from a range of particles and their corresponding spatial hashes.
/**
Expand All @@ -138,14 +137,14 @@ using Map = std::unordered_map<std::size_t, Cell<State>>;
template <class States, class Weights, class Hashes>
[[nodiscard]] static auto make_cluster_map(States&& states, Weights&& weights, Hashes&& hashes) {
using State = ranges::range_value_t<States>;
Map<State> map;
ClusterMap<State> map;

// Preallocate memory with a very rough estimation of the number of cells we might end up with.
map.reserve(states.size() / 5);

// Calculate the accumulated cell weight and save a single representative state for each cell.
for (const auto& [state, weight, hash] : ranges::views::zip(states, weights, hashes)) {
auto [it, inserted] = map.try_emplace(hash, Cell<State>{});
auto [it, inserted] = map.try_emplace(hash, ClusterCell<State>{});
auto& [_, entry] = *it;
entry.weight += weight;
entry.num_particles++;
Expand All @@ -172,7 +171,7 @@ template <class States, class Weights, class Hashes>
* \param percentile The percentile threshold for capping the weights.
*/
template <class State>
static void normalize_and_cap_weights(Map<State>& map, double percentile) {
static void normalize_and_cap_weights(ClusterMap<State>& map, double percentile) {
auto entries = ranges::views::values(map);

for (auto& entry : entries) {
Expand All @@ -181,7 +180,7 @@ static void normalize_and_cap_weights(Map<State>& map, double percentile) {
}

const double max_weight =
calculate_percentile_threshold(ranges::views::transform(entries, &Cell<State>::weight), percentile);
calculate_percentile_threshold(ranges::views::transform(entries, &ClusterCell<State>::weight), percentile);

for (auto& entry : entries) {
entry.weight = std::min(entry.weight, max_weight);
Expand All @@ -201,8 +200,8 @@ static void normalize_and_cap_weights(Map<State>& map, double percentile) {
* \param neighbors_function A function that returns neighboring cell hashes for a given state.
*/
template <class State, class NeighborsFunction>
static void assign_clusters(Map<State>& map, NeighborsFunction&& neighbors_function) {
auto queue = make_priority_queue(map, &Cell<State>::weight);
static void assign_clusters(ClusterMap<State>& map, NeighborsFunction&& neighbors_function) {
auto queue = make_priority_queue(map, &ClusterCell<State>::weight);
const auto max_priority = queue.top().priority;

std::size_t next_cluster_id = 0;
Expand Down Expand Up @@ -284,14 +283,14 @@ class ParticleClusterizer {

/// Clusters the given particles based on their states and weights.
/**
* \tparam Particles The type of the particles range.
* \param particles A range of particles to be clustered.
* \tparam States Range type of the states.
* \tparam Weights Range type of the weights.
* \param states Range containing the states of the particles.
* \param weights Range containing the weights of the particles.
* \return A vector of cluster IDs corresponding to the input particles.
*/
template <class Particles>
[[nodiscard]] auto operator()(Particles&& particles) {
auto states = beluga::views::states(particles);
auto weights = beluga::views::weights(particles);
template <class States, class Weights>
[[nodiscard]] auto operator()(States&& states, Weights&& weights) {
auto hashes = states | ranges::views::transform(spatial_hash_function_) | ranges::to<std::vector>;

auto map = clusterizer_detail::make_cluster_map(states, weights, hashes);
Expand Down Expand Up @@ -404,23 +403,25 @@ template <class States, class Weights, class Clusters>
* of the cluster with the highest total weight is returned. If no clusters are found,
* the overall mean and covariance of the particles are calculated and returned.
*
* \tparam Particles The type of the particles range.
* \param particles A range of particles to be clustered and estimated.
* \tparam States Range type of the states.
* \tparam Weights Range type of the weights.
* \param states Range containing the states of the particles.
* \param weights Range containing the weights of the particles.
* \param parameters Parameters for the particle clusterizer (optional).
* \return A pair consisting of the state mean and covariance of the cluster with the highest total weight.
*/
template <class Particles>
[[nodiscard]] auto cluster_based_estimate(Particles&& particles, ParticleClusterizerParam parameters = {}) {
const auto clusters = ParticleClusterizer{parameters}(particles);
template <class States, class Weights>
[[nodiscard]] auto cluster_based_estimate(
States&& states, //
Weights&& weights, //
ParticleClusterizerParam parameters = {}) {
const auto clusters = ParticleClusterizer{parameters}(states, weights);

auto per_cluster_estimates =
estimate_clusters(beluga::views::states(particles), beluga::views::weights(particles), clusters);
auto per_cluster_estimates = estimate_clusters(states, weights, clusters);

if (per_cluster_estimates.empty()) {
// hmmm... maybe the particles are too fragmented? calculate the overall mean and covariance.
return beluga::estimate(
beluga::views::states(particles), // namespace beluga
beluga::views::weights(particles));
return beluga::estimate(states, weights);
}

const auto [_, mean, covariance] =
Expand Down
21 changes: 12 additions & 9 deletions beluga/test/beluga/algorithm/test_cluster_based_estimation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

#include <beluga/algorithm/cluster_based_estimation.hpp>
#include <beluga/algorithm/spatial_hash.hpp>
#include <beluga/views.hpp>
#include <range/v3/action/sort.hpp>
#include <range/v3/action/unique.hpp>
#include <range/v3/view/filter.hpp>
Expand Down Expand Up @@ -55,11 +56,11 @@ struct ClusterBasedEstimationDetailTesting : public ::testing::Test {
[[nodiscard]] auto generate_test_grid_cell_data_map() const {
constexpr auto kUpperLimit = 30.0;

typename clusterizer_detail::Map<SE2d> map;
typename clusterizer_detail::ClusterMap<SE2d> map;
for (double x = 0.0; x < kUpperLimit; x += 1.0) {
const auto weight = x;
const auto state = SE2d{SO2d{0.}, Vector2d{x, x}};
map.emplace(spatial_hash_function(state), clusterizer_detail::Cell<SE2d>{state, weight, 0, std::nullopt});
map.emplace(spatial_hash_function(state), clusterizer_detail::ClusterCell<SE2d>{state, weight, 0, std::nullopt});
}
return map;
}
Expand Down Expand Up @@ -161,7 +162,7 @@ TEST_F(ClusterBasedEstimationDetailTesting, MakePriorityQueue) {
auto data = generate_test_grid_cell_data_map();

// test proper
auto prio_queue = make_priority_queue(data, &clusterizer_detail::Cell<SE2d>::weight);
auto prio_queue = make_priority_queue(data, &clusterizer_detail::ClusterCell<SE2d>::weight);
EXPECT_EQ(prio_queue.size(), data.size());

// from there on the weights should be strictly decreasing
Expand All @@ -187,11 +188,11 @@ TEST_F(ClusterBasedEstimationDetailTesting, MapGridCellsToClustersStep) {
}
}

typename clusterizer_detail::Map<SE2d> map;
typename clusterizer_detail::ClusterMap<SE2d> map;

for (const auto& [x, y, w] : coordinates) {
const auto state = SE2d{SO2d{0.}, Vector2d{x, y}};
map.emplace(spatial_hash_function(state), clusterizer_detail::Cell<SE2d>{state, w, 0, std::nullopt});
map.emplace(spatial_hash_function(state), clusterizer_detail::ClusterCell<SE2d>{state, w, 0, std::nullopt});
}

const auto neighbors = [&](const auto& state) {
Expand All @@ -212,7 +213,7 @@ TEST_F(ClusterBasedEstimationDetailTesting, MapGridCellsToClustersStep) {
// only cells beyond the 10% weight percentile to avoid the messy border
// between clusters beneath that threshold
const auto ten_percent_threshold = calculate_percentile_threshold(
map | ranges::views::values | ranges::views::transform(&clusterizer_detail::Cell<SE2d>::weight), 0.15);
map | ranges::views::values | ranges::views::transform(&clusterizer_detail::ClusterCell<SE2d>::weight), 0.15);

clusterizer_detail::assign_clusters(map, neighbors);

Expand Down Expand Up @@ -294,7 +295,8 @@ TEST_F(ClusterBasedEstimationDetailTesting, ClusterStateEstimationStep) {
auto particles = make_particle_multicluster_dataset(0.0, k_field_side, 0.0, k_field_side, 1.0);

const auto clusters =
ParticleClusterizer{ParticleClusterizerParam{kSpatialHashResolution, kAngularHashResolution, 0.9}}(particles);
ParticleClusterizer{ParticleClusterizerParam{kSpatialHashResolution, kAngularHashResolution, 0.9}}(
beluga::views::states(particles), beluga::views::weights(particles));

auto per_cluster_estimates =
estimate_clusters(beluga::views::states(particles), beluga::views::weights(particles), clusters);
Expand Down Expand Up @@ -369,7 +371,8 @@ TEST_F(ClusterBasedEstimationDetailTesting, HeaviestClusterSelectionTest) {

const auto [expected_pose, expected_covariance] = beluga::estimate(max_peak_masked_states, max_peak_masked_weights);

const auto [pose, covariance] = beluga::cluster_based_estimate(particles);
const auto [pose, covariance] =
beluga::cluster_based_estimate(beluga::views::states(particles), beluga::views::weights(particles));

ASSERT_THAT(pose, SE2Near(expected_pose.so2(), expected_pose.translation(), kTolerance));
ASSERT_NEAR(covariance(0, 0), expected_covariance(0, 0), 0.001);
Expand Down Expand Up @@ -398,7 +401,7 @@ TEST_F(ClusterBasedEstimationDetailTesting, NightmareDistributionTest) {
const auto [expected_pose, expected_covariance] = beluga::estimate(states, weights);

auto particles = ranges::views::zip(states, weights) | ranges::to<std::vector>();
const auto [pose, covariance] = beluga::cluster_based_estimate(particles);
const auto [pose, covariance] = beluga::cluster_based_estimate(states, weights);

ASSERT_THAT(pose, SE2Near(expected_pose.so2(), expected_pose.translation(), kTolerance));
ASSERT_NEAR(covariance(0, 0), expected_covariance(0, 0), 0.001);
Expand Down

0 comments on commit 5124aa3

Please sign in to comment.