Skip to content

Commit

Permalink
Merge branch 'recup_disp_min_disp_max' into 'release'
Browse files Browse the repository at this point in the history
feat: recupération des disp min et disp max dans les datasets

See merge request 3d/PandoraBox/pandora!374
  • Loading branch information
duboise-cnes committed Feb 13, 2025
2 parents 654caf8 + e021efd commit 62c098a
Show file tree
Hide file tree
Showing 13 changed files with 406 additions and 131 deletions.
5 changes: 5 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -211,3 +211,8 @@ clean-docker: ## clean docker image
@@[ "${CHECK_DOCKER}" ] || ( echo ">> docker not found"; exit 1 )
@echo "Clean Docker image cnes/pandora dev"
@docker image rm cnes/pandora:dev

.PHONY: test-unit-cpp
test-unit-cpp: install ## run unit cpp tests only for dev
@echo "Run unit cpp tests"
. ${PANDORA_VENV}/bin/activate; meson test -C build/$(shell ls build)/ -v
7 changes: 7 additions & 0 deletions meson.build
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,9 @@ meson.add_dist_script('version.sh', 'set-dist', meson.project_version())
py = import('python').find_installation(pure: false)

pybind11_dep = dependency('pybind11')
doctest_dep = dependency('doctest')
python_dep = dependency('python3-embed')


install_subdir(
'pandora',
Expand Down Expand Up @@ -44,4 +47,8 @@ subdir(

subdir(
'pandora/matching_cost/cpp'
)

subdir(
'tests/test_cpp/'
)
3 changes: 2 additions & 1 deletion pandora/cost_volume_confidence/cost_volume_confidence.py
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,8 @@ def allocate_confidence_map(
"""

# Add common prefix to confidence measure name
name_confidence_measure = "confidence_from_" + name_confidence_measure
if "disp_min" not in name_confidence_measure and "disp_max" not in name_confidence_measure:
name_confidence_measure = "confidence_from_" + name_confidence_measure

if cv is not None:
# cost volume already contains a confidence map, it must be updated
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,8 +78,9 @@ def compute_risk_and_sampled_risk(cv, sampled_ambiguity, etas, nbr_etas, grids,
:type disparity_range: np.ndarray
:param sample_risk: whether or not to compute and return the sampled risk
:type sample_risk: bool
:return: the risk and sampled risk if asked
:return: the risk, the disp min and max and sampled risk if asked
:rtype: Tuple(2D np.ndarray (row, col) dtype = float32, 2D np.ndarray (row, col) dtype = float32, \
2D np.ndarray (row, col) dtype = float32, 2D np.ndarray (row, col) dtype = float32, \
3D np.ndarray (row, col) dtype = float32, 3D np.ndarray (row, col) dtype = float32)
"""
return None, None
return None, None, None, None
Original file line number Diff line number Diff line change
Expand Up @@ -38,5 +38,13 @@ namespace py = pybind11;
* @return the index where value should be inserted
*/
size_t searchsorted(const py::array_t<float>& array, float value);

std::tuple<float, float,
pybind11::detail::unchecked_mutable_reference<float, 2>,
pybind11::detail::unchecked_mutable_reference<float, 2>>
min_max_cost(
py::detail::unchecked_reference<float, 3> r_cv,
size_t n_row,
size_t n_col,
size_t n_disp
);
#endif // COST_VOLUME_CONFIDENCE_TOOLS_HPP
53 changes: 11 additions & 42 deletions pandora/cost_volume_confidence/cpp/src/ambiguity.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -64,38 +64,7 @@ py::list compute_ambiguity_and_sampled_ambiguity(
to_return.append(samp_amb);
}

py::array_t<float> min_img = py::array_t<float>({n_row, n_col});
py::array_t<float> max_img = py::array_t<float>({n_row, n_col});
auto rw_min_img = min_img.mutable_unchecked<2>();
auto rw_max_img = max_img.mutable_unchecked<2>();

// Minimum and maximum of all costs, useful to normalize the cost volume
float min_cost = std::numeric_limits<float>::infinity();
float max_cost = -std::numeric_limits<float>::infinity();
for (int i = 0; i < n_row; ++i) {
for (int j = 0; j < n_col; ++j) {
float pix_min_cost = std::numeric_limits<float>::infinity();
float pix_max_cost = -std::numeric_limits<float>::infinity();
bool insert_nan = true;
for (int k = 0; k < n_disp; ++k) {
float val = r_cv(i,j,k);
if ( !std::isnan(val) ) {
insert_nan = false;
pix_min_cost = std::min(pix_min_cost, val);
pix_max_cost = std::max(pix_max_cost, val);
}
}
if (insert_nan) {
rw_min_img(i, j) = std::numeric_limits<float>::quiet_NaN();
rw_max_img(i, j) = std::numeric_limits<float>::quiet_NaN();
continue;
}
rw_min_img(i, j) = pix_min_cost;
rw_max_img(i, j) = pix_max_cost;
min_cost = std::min(min_cost, pix_min_cost);
max_cost = std::max(max_cost, pix_max_cost);
}
}
auto [min_cost, max_cost, rw_min_img, rw_max_img] = min_max_cost(r_cv, n_row, n_col, n_disp);

float extremum_cost = type_measure_min ? min_cost : max_cost;
float diff_cost = max_cost - min_cost;
Expand All @@ -109,11 +78,11 @@ py::list compute_ambiguity_and_sampled_ambiguity(
float cv_val;
float amb_sum = 0;
bool amb_status;
for (int row = 0; row < n_row; ++row) {
for (int col = 0; col < n_col; ++col) {
for (size_t row = 0; row < n_row; ++row) {
for (size_t col = 0; col < n_col; ++col) {

// Normalized extremum cost for one point
norm_extremum = type_measure_min*(rw_min_img(row, col) - extremum_cost) / diff_cost
norm_extremum = type_measure_min*(rw_min_img(row, col) - extremum_cost) / diff_cost
+ !type_measure_min*(rw_max_img(row, col) - extremum_cost) / diff_cost;

// If all costs are at nan, set the maximum value of the ambiguity for this point
Expand All @@ -131,10 +100,10 @@ py::list compute_ambiguity_and_sampled_ambiguity(
// fill normalized cv for this pixel (+-inf when encountering NaNs)
int nb_minfs = 0;
int nb_pinfs = 0;
for (int disp = 0; disp < n_disp; ++disp) {
for (size_t disp = 0; disp < n_disp; ++disp) {
cv_val = r_cv(row, col, disp);
if (std::isnan(cv_val)) {
// Mask nan to -inf/inf to increase/decrease the value of the ambiguity
// Mask nan to -inf/inf to increase/decrease the value of the ambiguity
// if a point contains nan costs
if (disp >= idx_disp_min && disp < idx_disp_max) {
normalized_pix_costs[disp] = -std::numeric_limits<float>::infinity();
Expand All @@ -143,7 +112,7 @@ py::list compute_ambiguity_and_sampled_ambiguity(
else {
normalized_pix_costs[disp] = std::numeric_limits<float>::infinity();
nb_pinfs++;
}
}
continue;
}
normalized_pix_costs[disp] = (cv_val - extremum_cost) / diff_cost;
Expand All @@ -152,9 +121,9 @@ py::list compute_ambiguity_and_sampled_ambiguity(
// fill sampled ambiguity, compute integral
amb_sum = 0;
if (type_measure_min) {
for (int eta = 0; eta < nbr_etas; ++eta) {
for (size_t eta = 0; eta < nbr_etas; ++eta) {
float amb_eta_sum = 0;
for (int disp = 0; disp < n_disp; ++disp) {
for (size_t disp = 0; disp < n_disp; ++disp) {
amb_status = normalized_pix_costs[disp] <= (norm_extremum + r_etas(eta));
amb_eta_sum += amb_status ? 1.f : 0.f;
}
Expand All @@ -163,8 +132,8 @@ py::list compute_ambiguity_and_sampled_ambiguity(
rw_samp_amb->operator()(row, col, eta) = amb_eta_sum;
}
} else {
for (int eta = 0; eta < nbr_etas; ++eta) {
for (int disp = 0; disp < n_disp; ++disp) {
for (size_t eta = 0; eta < nbr_etas; ++eta) {
for (size_t disp = 0; disp < n_disp; ++disp) {
amb_status = normalized_pix_costs[disp] >= (norm_extremum - r_etas(eta));
amb_sum += amb_status ? 1.f : 0.f;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,3 +37,51 @@ size_t searchsorted(const py::array_t<float>& array, float value) {
}
return left;
}

std::tuple<float, float,
pybind11::detail::unchecked_mutable_reference<float, 2>,
pybind11::detail::unchecked_mutable_reference<float, 2>>
min_max_cost(
py::detail::unchecked_reference<float, 3> r_cv,
size_t n_row,
size_t n_col,
size_t n_disp
){
py::array_t<float> min_img = py::array_t<float>({n_row, n_col});
py::array_t<float> max_img = py::array_t<float>({n_row, n_col});
auto rw_min_img = min_img.mutable_unchecked<2>();
auto rw_max_img = max_img.mutable_unchecked<2>();

float min_cost = std::numeric_limits<float>::infinity();
float max_cost = -std::numeric_limits<float>::infinity();
float pix_min_cost;
float pix_max_cost;
float val;
bool insert_nan;
for (size_t i = 0; i < n_row; ++i) {
for (size_t j = 0; j < n_col; ++j) {
pix_min_cost = std::numeric_limits<float>::infinity();
pix_max_cost = -std::numeric_limits<float>::infinity();
insert_nan = true;
for (size_t k = 0; k < n_disp; ++k) {
val = r_cv(i,j,k);
if ( !std::isnan(val) ) {
insert_nan = false;
pix_min_cost = std::min(pix_min_cost, val);
pix_max_cost = std::max(pix_max_cost, val);
}
}
if (insert_nan) {
rw_min_img(i, j) = std::numeric_limits<float>::quiet_NaN();
rw_max_img(i, j) = std::numeric_limits<float>::quiet_NaN();
continue;
}
rw_min_img(i, j) = pix_min_cost;
rw_max_img(i, j) = pix_max_cost;
min_cost = std::min(min_cost, pix_min_cost);
max_cost = std::max(max_cost, pix_max_cost);

}
}
return std::make_tuple(min_cost, max_cost, rw_min_img, rw_max_img);
}
90 changes: 47 additions & 43 deletions pandora/cost_volume_confidence/cpp/src/risk.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
#include <algorithm>
#include <numeric>
#include <cmath>
#include <tuple>

namespace py = pybind11;

Expand Down Expand Up @@ -52,6 +53,11 @@ py::list compute_risk_and_sampled_risk(
auto rw_risk_min = risk_min.mutable_unchecked<2>();
auto rw_risk_max = risk_max.mutable_unchecked<2>();

py::array_t<float> risk_disp_inf = py::array_t<float>({n_row, n_col});
py::array_t<float> risk_disp_sup = py::array_t<float>({n_row, n_col});
auto rw_risk_disp_inf = risk_disp_inf.mutable_unchecked<2>();
auto rw_risk_disp_sup = risk_disp_sup.mutable_unchecked<2>();

py::array_t<float> samp_risk_min;
py::array_t<float> samp_risk_max;
std::unique_ptr<py::detail::unchecked_mutable_reference<float, 3>> rw_samp_risk_min;
Expand All @@ -69,41 +75,13 @@ py::list compute_risk_and_sampled_risk(
);
}

py::array_t<float> min_img = py::array_t<float>({n_row, n_col});
auto rw_min_img = min_img.mutable_unchecked<2>();

// min and max cost for normalization
float min_cost = std::numeric_limits<float>::infinity();
float max_cost = -std::numeric_limits<float>::infinity();
float pix_min_cost;
float val;
bool insert_nan;
for (int i = 0; i < n_row; ++i) {
for (int j = 0; j < n_col; ++j) {
pix_min_cost = std::numeric_limits<float>::infinity();
insert_nan = true;
for (int k = 0; k < n_disp; ++k) {
val = r_cv(i,j,k);
if ( !std::isnan(val) ) {
insert_nan = false;
pix_min_cost = std::min(pix_min_cost, val);
max_cost = std::max(max_cost, val);
}
}
if (insert_nan) {
rw_min_img(i, j) = std::numeric_limits<float>::quiet_NaN();
continue;
}
rw_min_img(i, j) = pix_min_cost;
min_cost = std::min(min_cost, pix_min_cost);
}
}
auto [min_cost, max_cost, rw_min_img, _] = min_max_cost(r_cv, n_row, n_col, n_disp);

float diff_cost = max_cost - min_cost;

float* normalized_pix_costs = new float[n_disp];
for (int row = 0; row < n_row; ++row) {
for (int col = 0; col < n_col; ++col) {
for (size_t row = 0; row < n_row; ++row) {
for (size_t col = 0; col < n_col; ++col) {

// Normalized extremum cost for one point
float normalized_extremum = (rw_min_img(row, col) - min_cost) / diff_cost;
Expand All @@ -112,9 +90,12 @@ py::list compute_risk_and_sampled_risk(
if (std::isnan(normalized_extremum)) {
rw_risk_min(row, col) = std::numeric_limits<float>::quiet_NaN();
rw_risk_max(row, col) = std::numeric_limits<float>::quiet_NaN();
if (!sample_risk)

rw_risk_disp_inf(row, col) = std::numeric_limits<float>::quiet_NaN();
rw_risk_disp_sup(row, col) = std::numeric_limits<float>::quiet_NaN();
if (!sample_risk)
continue;

for (size_t eta = 0; eta < nbr_etas; ++eta) {
rw_samp_risk_min->operator()(
row, col, eta
Expand All @@ -130,37 +111,55 @@ py::list compute_risk_and_sampled_risk(
size_t idx_disp_max = searchsorted(disparity_range, r_grids(1, row, col)) + 1;

// fill normalized cv for this pixel (mask with +-inf when encountering NaNs)
for (int disp = 0; disp < n_disp; ++disp) {
for (size_t disp = 0; disp < n_disp; ++disp) {
float cv_val = r_cv(row, col, disp);
if (std::isnan(cv_val)) {
if (disp >= idx_disp_min && disp < idx_disp_max) {
normalized_pix_costs[disp] = -std::numeric_limits<float>::infinity();
}
else {
normalized_pix_costs[disp] = std::numeric_limits<float>::infinity();
}
}
continue;
}
normalized_pix_costs[disp] = (cv_val - min_cost) / diff_cost;
}

float sum_for_min = 0;
float sum_for_max = 0;
for (int eta = 0; eta < nbr_etas; ++eta) {
float sum_for_disp_inf = 0;
float sum_for_disp_sup = 0;
for (size_t eta = 0; eta < nbr_etas; ++eta) {
// Obtain min and max disparities for each sample
float min_disp = std::numeric_limits<float>::infinity();
float max_disp = -std::numeric_limits<float>::infinity();
for (int disp = 0; disp < n_disp; ++disp) {
float min_disp_idx = std::numeric_limits<float>::infinity();
float max_disp_idx = -std::numeric_limits<float>::infinity();
for (size_t disp = 0; disp < n_disp; ++disp) {
if (normalized_pix_costs[disp] > (normalized_extremum + r_etas(eta)))
continue;

min_disp = std::min(min_disp, static_cast<float>(disp));
max_disp = std::max(max_disp, static_cast<float>(disp));
min_disp_idx = std::min(min_disp_idx, static_cast<float>(disp));
max_disp_idx = std::max(max_disp_idx, static_cast<float>(disp));
}

int min_index = static_cast<int>(min_disp_idx);
int max_index = static_cast<int>(max_disp_idx);

float min_disp = std::numeric_limits<float>::infinity();
float max_disp = -std::numeric_limits<float>::infinity();
py::buffer_info buf_info = disparity_range.request();
float* data_ptr = static_cast<float*>(buf_info.ptr);

min_disp = data_ptr[min_index];
max_disp = data_ptr[max_index];
// add sampled max risk to sum
float eta_max_disp = max_disp - min_disp;
float eta_max_disp = max_disp_idx - min_disp_idx;
// add sampled min risk to sum. risk min is defined as ( (1+risk(p,k)) - amb(p,k) )
float eta_min_disp = 1 + eta_max_disp - r_samp_amb(row, col, eta);

// add sampled min and max disp to sum
sum_for_disp_sup += max_disp;
sum_for_disp_inf += min_disp;

sum_for_min += eta_min_disp;
sum_for_max += eta_max_disp;
if (sample_risk) {
Expand All @@ -174,16 +173,21 @@ py::list compute_risk_and_sampled_risk(
rw_risk_min(row, col) = sum_for_min / nbr_etas;
rw_risk_max(row, col) = sum_for_max / nbr_etas;

rw_risk_disp_sup(row, col) = sum_for_disp_sup / nbr_etas;
rw_risk_disp_inf(row, col) = sum_for_disp_inf /nbr_etas;

}
}

delete[] normalized_pix_costs;

to_return.append( risk_max );
to_return.append( risk_min );
to_return.append( risk_disp_sup );
to_return.append( risk_disp_inf );
if (sample_risk) {
to_return.append( samp_risk_max );
to_return.append( samp_risk_min );
}
return to_return;
return to_return;
}
Loading

0 comments on commit 62c098a

Please sign in to comment.