From fa122a17964523c16cbc39c194ae7f7a88e5dbff Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Mon, 17 Jun 2013 15:43:51 +0200 Subject: [PATCH] Add MPI parallelized PLAIN integrator --- include/Makefile.am | 1 + include/hep/mc-mpi.hpp | 1 + include/hep/mc/mpi_plain.hpp | 125 +++++++++++++++++++++++++++++++++++ 3 files changed, 127 insertions(+) create mode 100644 include/hep/mc/mpi_plain.hpp diff --git a/include/Makefile.am b/include/Makefile.am index 6551253..9c9fe86 100644 --- a/include/Makefile.am +++ b/include/Makefile.am @@ -3,6 +3,7 @@ nobase_include_HEADERS = \ hep/mc/mc_point.hpp \ hep/mc/mc_result.hpp \ hep/mc/mpi_datatype.hpp \ + hep/mc/mpi_plain.hpp \ hep/mc/mpi_vegas.hpp \ hep/mc/plain.hpp \ hep/mc/vegas.hpp \ diff --git a/include/hep/mc-mpi.hpp b/include/hep/mc-mpi.hpp index 8912787..48df3d7 100644 --- a/include/hep/mc-mpi.hpp +++ b/include/hep/mc-mpi.hpp @@ -20,6 +20,7 @@ */ #include +#include #include namespace hep diff --git a/include/hep/mc/mpi_plain.hpp b/include/hep/mc/mpi_plain.hpp new file mode 100644 index 0000000..fdf9f76 --- /dev/null +++ b/include/hep/mc/mpi_plain.hpp @@ -0,0 +1,125 @@ +#ifndef HEP_MC_MPI_PLAIN_HPP +#define HEP_MC_MPI_PLAIN_HPP + +/* + * hep-mc - A Template Library for Monte Carlo Integration + * Copyright (C) 2013 Christopher Schwan + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include + +#include +#include +#include + +#include + +namespace hep +{ + +/** + * \ingroup algorithms + * + * MPI-parallelized PLAIN Monte Carlo integrator. This function integrates + * `function` over the unit-hypercube with the specified `dimensions` using + * `calls` function evaluations at randomly chosen points determined by + * `generator`. The generator is not seeded. + * + * \param communicator The MPI communicator that is used to communicate between + * the different MPI processes. + * \param dimensions The number of parameters `function` accepts. + * \param calls The number of function calls that are used to obtain the result. + * \param function The function that will be integrated over the hypercube. See + * \ref integrands for further explanation. + * \param generator The random number generator that will be used to generate + * random points from the hypercube. This generator is not seeded. + */ +template +mc_result mpi_plain( + MPI_Comm communicator, + std::size_t dimensions, + std::size_t calls, + F&& function, + R&& generator = std::mt19937() +) { + int rank = 0; + MPI_Comm_rank(communicator, &rank); + int world = 0; + MPI_Comm_size(communicator, &world); + + // seed every random number generator differently + std::size_t r = rank * 10; + std::seed_seq sequence{r+0, r+1, r+2, r+3, r+4, r+5, r+6, r+7, r+8, r+9}; + generator.seed(sequence); + + // default-initialize sum and sum_of_squares + T buffer[2] = { T(), T() }; + T& sum = buffer[0]; + T& sum_of_squares = buffer[1]; + + // generates random number in the range [0,1] + std::uniform_real_distribution distribution; + + // compensation variable for kahan summation + T compensation = T(); + + std::vector random_numbers(dimensions); + + // the number of function calls for each MPI process + std::size_t const sub_calls = (calls / world) + + (static_cast (rank) < (calls % world) ? 1 : 0); + + // iterate over calls + for (std::size_t i = 0; i != sub_calls; ++i) + { + mc_point point(calls, random_numbers); + + // fill container with random numbers + for (std::size_t j = 0; j != dimensions; ++j) + { + random_numbers[j] = distribution(generator); + } + + // evaluate function at position specified in random_numbers + T const value = function(static_cast const> (point)); + + // perform kahan summation 'sum += value' - this improves precision if + // T is single precision and many values are added + T const y = value - compensation; + T const t = sum + y; + compensation = (t - sum) - y; + sum = t; + + sum_of_squares += value * value; + } + + MPI_Allreduce( + MPI_IN_PLACE, + &buffer, + 2, + mpi_datatype(), + MPI_SUM, + communicator + ); + + return mc_result(calls, sum, sum_of_squares); +} + +} + +#endif