From 100e43564b2098767243c2b8d4982cbaba6845be Mon Sep 17 00:00:00 2001 From: amice Date: Wed, 4 Dec 2024 14:33:34 -0500 Subject: [PATCH] rewrite python bindings --- .../solvers/solvers_py_mathematicalprogram.cc | 94 +++++++---- geometry/optimization/convex_set.cc | 149 +++++------------- geometry/optimization/graph_of_convex_sets.cc | 6 +- solvers/solve.cc | 18 ++- 4 files changed, 123 insertions(+), 144 deletions(-) diff --git a/bindings/pydrake/solvers/solvers_py_mathematicalprogram.cc b/bindings/pydrake/solvers/solvers_py_mathematicalprogram.cc index 896c3d02e6f1..2c0ec1d87389 100644 --- a/bindings/pydrake/solvers/solvers_py_mathematicalprogram.cc +++ b/bindings/pydrake/solvers/solvers_py_mathematicalprogram.cc @@ -1637,7 +1637,7 @@ void BindFreeFunctions(py::module m) { // Inside of the lambda we'll demote nullopts back to nullptrs. Note // that SolverOptions is not necessarily cheap to copy, so we still // carefully accept it by-pointer. The VectorXd is always necessarily - // copied when going form numpy to Eigen so we still pass it by-value. + // copied when going from numpy to Eigen so we still pass it by-value. // TODO(Alexandre.Amice) change to generator form. [](std::vector progs, std::optional>> @@ -1646,25 +1646,41 @@ void BindFreeFunctions(py::module m) { solver_options, std::optional>> solver_ids, const Parallelism& parallelism, bool dynamic_schedule) { - std::vector initial_guesses_ptrs; - if (initial_guesses.has_value()) { - initial_guesses_ptrs.reserve(initial_guesses->size()); - for (const auto& guess : *initial_guesses) { - initial_guesses_ptrs.push_back(guess ? &(*guess) : nullptr); + const std::function + progs_generator = + [&progs](int64_t, int64_t i) -> const MathematicalProgram* { + return progs.at(i); + }; + auto initial_guess_generator = + [&initial_guesses]( + int64_t, int64_t i) -> std::optional { + if (initial_guesses.has_value()) { + return initial_guesses->at(i); + } else { + return std::nullopt; } - } - std::vector solver_options_ptrs; - if (solver_options.has_value()) { - solver_options_ptrs.reserve(solver_options->size()); - for (const auto& option : *solver_options) { - solver_options_ptrs.push_back(option ? *option : nullptr); + }; + auto solver_options_generator = + [&solver_options]( + int64_t, int64_t i) -> std::optional { + if (solver_options.has_value() && + *(solver_options->at(i)) != nullptr) { + return **(solver_options->at(i)); + } else { + return std::nullopt; } - } - return solvers::SolveInParallel(progs, - initial_guesses.has_value() ? &initial_guesses_ptrs : nullptr, - solver_options.has_value() ? &solver_options_ptrs : nullptr, - solver_ids.has_value() ? &(*solver_ids) : nullptr, parallelism, - dynamic_schedule); + }; + auto solver_ids_generator = + [&solver_ids](int64_t, int64_t i) -> std::optional { + if (solver_ids.has_value()) { + return solver_ids->at(i); + } else { + return std::nullopt; + } + }; + return solvers::SolveInParallel(progs_generator, 0, ssize(progs), + initial_guess_generator, solver_options_generator, + solver_ids_generator, parallelism, dynamic_schedule); }, py::arg("progs"), py::arg("initial_guesses") = std::nullopt, py::arg("solver_options") = std::nullopt, @@ -1683,16 +1699,37 @@ void BindFreeFunctions(py::module m) { const std::optional& solver_id, const Parallelism& parallelism, bool dynamic_schedule) { // TODO(Alexandre.Amice) change to generator form. - std::vector initial_guesses_ptrs; - if (initial_guesses.has_value()) { - initial_guesses_ptrs.reserve(initial_guesses->size()); - for (const auto& guess : *initial_guesses) { - initial_guesses_ptrs.push_back(guess ? &(*guess) : nullptr); + // std::vector + // initial_guesses_ptrs; if (initial_guesses.has_value()) + // { + // initial_guesses_ptrs.reserve(initial_guesses->size()); + // for (const auto& guess : *initial_guesses) { + // initial_guesses_ptrs.push_back(guess ? &(*guess) : + // nullptr); + // } + // } + // return solvers::SolveInParallel(progs, + // initial_guesses.has_value() ? + // &initial_guesses_ptrs : nullptr, solver_options, + // solver_id, parallelism, dynamic_schedule); + + const std::function + progs_generator = + [&progs](int64_t, int64_t i) -> const MathematicalProgram* { + return progs.at(i); + }; + auto initial_guess_generator = + [&initial_guesses]( + int64_t, int64_t i) -> std::optional { + if (initial_guesses.has_value()) { + return initial_guesses->at(i); + } else { + return std::nullopt; } - } - return solvers::SolveInParallel(progs, - initial_guesses.has_value() ? &initial_guesses_ptrs : nullptr, - solver_options, solver_id, parallelism, dynamic_schedule); + }; + return solvers::SolveInParallel(progs_generator, 0, ssize(progs), + initial_guess_generator, solver_options, solver_id, parallelism, + dynamic_schedule); }, py::arg("progs"), py::arg("initial_guesses") = std::nullopt, py::arg("solver_options") = std::nullopt, @@ -1702,6 +1739,9 @@ void BindFreeFunctions(py::module m) { py::call_guard(), doc.SolveInParallel .doc_6args_conststdvector_conststdvector_constdrakesolversSolverOptions_conststdoptional_drakeParallelism_bool); + // N.B. Do NOT bind the generator forms of SolveInParallel directly. Callbacks + // to Python state from parallel C++ is difficult and likely to lead to + // deadlocks. } } // namespace diff --git a/geometry/optimization/convex_set.cc b/geometry/optimization/convex_set.cc index a71c4409e534..409cb7a4a4cc 100644 --- a/geometry/optimization/convex_set.cc +++ b/geometry/optimization/convex_set.cc @@ -122,132 +122,67 @@ bool IsBoundedParallel(const ConvexSet& s, Parallelism parallelism) { ConstructEmptyBoundednessProgram(&(progs[i]), s); } - // Pre-allocate empty MathematicalProgramResults for each thread. - std::vector results(parallelism.num_threads()); - - // Pre-allocate solver instances. - const solvers::SolverId solver_id = solvers::ChooseBestSolver(progs[0]); - std::vector> solver_interfaces( - parallelism.num_threads()); - - // Pre-allocate the solver options. - std::vector options(parallelism.num_threads()); - - for (int i = 0; i < parallelism.num_threads(); ++i) { - solver_interfaces[i] = solvers::MakeSolver(solver_id); - options[i].SetOption(solvers::CommonSolverOption::kMaxThreads, 1); - } - std::atomic certified_unbounded = false; + auto index_to_dimension = [&](const int64_t i) -> int { + return i / 2; + }; // This worker lambda maps the index i to a dimension and direction to check - // boundedness. If unboundedness has already been verified, it just exits - // early. If unboundedness is verified in this iteration, certified_unbounded - // will be updated to reflect that fact. For a given index i, the dimension is - // int(i / 2), and if i % 2 == 0, then we maximize, otherwise, we minimize. - auto solve_ith = [&](const int thread_num, const int64_t i) { + // boundedness. If unboundedness has already been verified, it returns + // nullptr. If unboundedness is verified in this iteration, + // certified_unbounded will be updated to reflect that fact. For a given index + // i, the dimension is int(i / 2), and if i % 2 == 0, then we maximize, + // otherwise, we minimize. + auto make_ith = [&](int64_t thread_num, int64_t i) -> MathematicalProgram* { if (certified_unbounded.load()) { - return; + // Exit early if unboundedness has already been verified. + return nullptr; } - const int dimension = i / 2; + const int dimension = index_to_dimension(i); bool maximize = i % 2 == 0; // Update the objective vector. By construction, each MathematicalProgram // has one cost (the linear cost). progs[thread_num].linear_costs()[0].evaluator()->update_coefficient_entry( dimension, maximize ? -1 : 1); - DRAKE_ASSERT(progs[thread_num].IsThreadSafe()); - solver_interfaces[thread_num]->Solve(progs[thread_num], std::nullopt, - options[thread_num], - &(results[thread_num])); - if (ProgramResultImpliesUnbounded(results[thread_num])) { - certified_unbounded.store(true); - } - - // Reset the objective vector. - progs[thread_num].linear_costs()[0].evaluator()->update_coefficient_entry( - dimension, 0); + return &(progs[thread_num]); }; + std::function + teardown_ith = [&](MathematicalProgram**, + const MathematicalProgramResult& result_i, + int64_t thread_num, int64_t i) { + const int dimension = index_to_dimension(i); + progs[thread_num] + .linear_costs()[0] + .evaluator() + ->update_coefficient_entry(dimension, 0); + if (ProgramResultImpliesUnbounded(result_i)) { + certified_unbounded.store(true); + } + }; + // We run the loop from 0 to 2 * s.ambient_dimension(), since two programs are // solved for each dimension (maximizing and minimizing). All programs are the // same size, so static scheduling is appropriate. - StaticParallelForIndexLoop(DegreeOfParallelism(parallelism.num_threads()), 0, - 2 * s.ambient_dimension(), solve_ith, - ParallelForBackend::BEST_AVAILABLE); - + std::vector results = + SolveInParallel( + make_ith, // prog_generator + 0, // range_start + 2 * s.ambient_dimension(), // range_end + [](int64_t, int64_t) -> std::optional { + return std::nullopt; + }, // initial_guesses_generator + nullptr, // solver_options + std::nullopt, // solver_id + Parallelism::Max(), // parallelism + false, // dynamic_schedule + &teardown_ith // prog_teardown + ); // NOLINT return !certified_unbounded.load(); } -//bool IsBoundedParallel2(const ConvexSet& s, Parallelism parallelism) { -// // Pre-allocate programs (which will be updated and solved within the parallel -// // loop). -// std::vector progs(parallelism.num_threads()); -// for (int i = 0; i < ssize(progs); ++i) { -// ConstructEmptyBoundednessProgram(&(progs[i]), s); -// } -// -// std::atomic certified_unbounded = false; -// auto index_to_dimension = [&](const int64_t i) -> int { -// return i / 2; -// }; -// -// // This worker lambda maps the index i to a dimension and direction to check -// // boundedness. If unboundedness has already been verified, it returns -// // nullptr. If unboundedness is verified in this iteration, -// // certified_unbounded will be updated to reflect that fact. For a given index -// // i, the dimension is int(i / 2), and if i % 2 == 0, then we maximize, -// // otherwise, we minimize. -// auto make_ith = [&](int64_t thread_num, int64_t i) -> MathematicalProgram* { -// if (certified_unbounded.load()) { -// // Exit early if unboundedness has already been verified. -// return nullptr; -// } -// -// const int dimension = index_to_dimension(i); -// bool maximize = i % 2 == 0; -// -// // Update the objective vector. By construction, each MathematicalProgram -// // has one cost (the linear cost). -// progs[thread_num].linear_costs()[0].evaluator()->update_coefficient_entry( -// dimension, maximize ? -1 : 1); -// return &(progs[thread_num]); -// }; -// -// std::function -// teardown_ith = [&](MathematicalProgram**, -// const MathematicalProgramResult& result_i, -// int64_t thread_num, int64_t i) { -// const int dimension = index_to_dimension(i); -// progs[thread_num] -// .linear_costs()[0] -// .evaluator() -// ->update_coefficient_entry(dimension, 0); -// if (ProgramResultImpliesUnbounded(result_i)) { -// certified_unbounded.store(true); -// } -// }; -// -// // We run the loop from 0 to 2 * s.ambient_dimension(), since two programs are -// // solved for each dimension (maximizing and minimizing). All programs are the -// // same size, so static scheduling is appropriate. -// std::vector results = -// SolveInParallel( -// make_ith, // prog_generator -// 0, // range_start -// 2 * s.ambient_dimension(), // range_end -// [](int64_t, int64_t) -> std::optional { -// return std::nullopt; -// }, // initial_guesses_generator -// nullptr, // solver_options -// std::nullopt, // solver_id -// Parallelism::Max(), // parallelism -// false, // dynamic_schedule -// &teardown_ith // prog_teardown -// ); // NOLINT -// return !certified_unbounded.load(); -//} } // namespace bool ConvexSet::GenericDoIsBounded(Parallelism parallelism) const { diff --git a/geometry/optimization/graph_of_convex_sets.cc b/geometry/optimization/graph_of_convex_sets.cc index 02a41e27f826..605e540841a3 100644 --- a/geometry/optimization/graph_of_convex_sets.cc +++ b/geometry/optimization/graph_of_convex_sets.cc @@ -826,13 +826,9 @@ std::set GraphOfConvexSets::PreprocessShortestPath( // remainder of the function. int nE = edge_id_list.size(); - // TODO(cohnt): Rewrite with a parallel for loop where each thread creates and - // solves the preprocessing program, to avoid having to use batching together - // with SolveInParallel. - + // TODO(cohnt): Rewrite using generators to avoid batching. // Given an edge (u,v) check if a path from source to u and another from v to // target exist without sharing edges. - int preprocessing_parallel_batch_size = 1000; int num_batches = 1 + (nE / preprocessing_parallel_batch_size); for (int batch_idx = 0; batch_idx < num_batches; ++batch_idx) { diff --git a/solvers/solve.cc b/solvers/solve.cc index 160c14a45f2e..c4c92005e189 100644 --- a/solvers/solve.cc +++ b/solvers/solve.cc @@ -11,6 +11,7 @@ #include "drake/solvers/choose_best_solver.h" #include "drake/solvers/ipopt_solver.h" #include "drake/solvers/solver_interface.h" + namespace drake { namespace solvers { using common_robotics_utilities::parallelism::DegreeOfParallelism; @@ -187,12 +188,11 @@ std::vector SolveInParallel( bool dynamic_schedule, const std::function* prog_teardown) { + const std::optional returned_options = + solver_options == nullptr ? std::nullopt + : std::optional{*solver_options}; auto solver_options_generator = - [&solver_options](int64_t, int64_t) -> std::optional { - static const std::optional returned_options = - solver_options == nullptr - ? std::nullopt - : std::optional{*solver_options}; + [&returned_options](int64_t, int64_t) -> std::optional { return returned_options; }; auto solver_id_generator = [&solver_id](int64_t, @@ -289,6 +289,14 @@ std::vector SolveInParallel( const SolverOptions* solver_options, const std::optional& solver_id, Parallelism parallelism, bool dynamic_schedule) { + DRAKE_THROW_UNLESS( + std::all_of(progs.begin(), progs.end(), [](const auto& prog) { + return prog != nullptr; + })); + if (initial_guesses != nullptr) { + DRAKE_THROW_UNLESS(initial_guesses->size() == progs.size()); + } + auto prog_generator = [&progs](const int, const int64_t i) -> const MathematicalProgram* { return GetRawPointer(progs[i]);