Skip to content

Commit

Permalink
Added options for test_count in the tests missing it and increased in…
Browse files Browse the repository at this point in the history
…ternal benchmarking iterations to 1000

Signed-off-by: Vedant <[email protected]>
  • Loading branch information
vrnimje committed Aug 23, 2024
1 parent f3620d7 commit b53373e
Show file tree
Hide file tree
Showing 10 changed files with 79 additions and 42 deletions.
3 changes: 2 additions & 1 deletion cmake/HPX_AddTest.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -301,6 +301,7 @@ function(add_hpx_performance_report_test subcategory name)
${ARGN}
RUN_SERIAL
"--hpx:print_cdash_img_path"
"--test_count=1000"
)
find_package(Python REQUIRED)

Expand All @@ -314,7 +315,7 @@ function(add_hpx_performance_report_test subcategory name)
${name}_cdash_results
COMMAND
sh -c
"${CMAKE_BINARY_DIR}/bin/${name}_test ${ARGN} --hpx:detailed_bench >${CMAKE_BINARY_DIR}/${name}.json"
"${CMAKE_BINARY_DIR}/bin/${name}_test ${ARGN} --test_count=1000 --hpx:detailed_bench >${CMAKE_BINARY_DIR}/${name}.json"
COMMAND
${Python_EXECUTABLE} ${CMAKE_SOURCE_DIR}/tools/perftests_plot.py
${CMAKE_BINARY_DIR}/${name}.json
Expand Down
9 changes: 2 additions & 7 deletions libs/core/algorithms/tests/performance/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -42,13 +42,8 @@ set(perftest_reports
)

set(foreach_report_perftest_PARAMETERS
THREADS_PER_LOCALITY
4
--hpx:ini=hpx.thread_queue.init_threads_count=100
--vector_size=104857
--work_delay=1
--chunk_size=0
--test_count=200
THREADS_PER_LOCALITY 4 --hpx:ini=hpx.thread_queue.init_threads_count=100
--vector_size=104857 --work_delay=1 --chunk_size=0
)

set(transform_reduce_scaling_perftest_PARAMETERS THREADS_PER_LOCALITY 4)
Expand Down
10 changes: 8 additions & 2 deletions libs/core/algorithms/tests/performance/benchmark_nth_element.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,8 @@ std::mt19937 my_rand(0);

int hpx_main(hpx::program_options::variables_map& vm)
{
int test_count = vm["test_count"].as<int>();

hpx::util::perftests_init(vm, "benchmark_nth_element");

typedef std::less<uint64_t> compare_t;
Expand All @@ -43,7 +45,7 @@ int hpx_main(hpx::program_options::variables_map& vm)

hpx::util::perftests_report("hpx::nth_element, size: " +
std::to_string(NELEM) + ", step: " + std::to_string(1),
"seq", 100, [&] {
"seq", test_count, [&] {
for (uint64_t i = 0; i < NELEM; i++)
{
B = A;
Expand All @@ -66,7 +68,7 @@ int hpx_main(hpx::program_options::variables_map& vm)

hpx::util::perftests_report("hpx::nth_element, size: " +
std::to_string(NELEM) + ", step: " + std::to_string(STEP),
"seq", 100, [&] {
"seq", test_count, [&] {
for (uint64_t i = 0; i < NELEM; i += STEP)
{
B = A;
Expand All @@ -86,6 +88,10 @@ int main(int argc, char* argv[])
options_description desc_commandline(
"Usage: " HPX_APPLICATION_STRING " [options]");

desc_commandline.add_options()("test_count",
hpx::program_options::value<int>()->default_value(10),
"number of tests to be averaged (default: 10)");

hpx::util::perftests_cfg(desc_commandline);

std::vector<std::string> cfg;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,8 @@ std::mt19937 my_rand(0);

int hpx_main(hpx::program_options::variables_map& vm)
{
int test_count = vm["test_count"].as<int>();

hpx::util::perftests_init(vm, "benchmark_nth_element_parallel");

typedef std::less<uint64_t> compare_t;
Expand All @@ -44,7 +46,7 @@ int hpx_main(hpx::program_options::variables_map& vm)

hpx::util::perftests_report("hpx::nth_element, size: " +
std::to_string(NELEM) + ", step: " + std::to_string(1),
"par", 100, [&] {
"par", test_count, [&] {
for (uint64_t i = 0; i < NELEM; ++i)
{
B = A;
Expand All @@ -68,7 +70,7 @@ int hpx_main(hpx::program_options::variables_map& vm)

hpx::util::perftests_report("hpx::nth_element, size: " +
std::to_string(NELEM) + ", step: " + std::to_string(STEP),
"par", 100, [&] {
"par", test_count, [&] {
for (uint64_t i = 0; i < NELEM; i += STEP)
{
B = A;
Expand All @@ -88,6 +90,10 @@ int main(int argc, char* argv[])
options_description desc_commandline(
"Usage: " HPX_APPLICATION_STRING " [options]");

desc_commandline.add_options()("test_count",
hpx::program_options::value<int>()->default_value(10),
"number of tests to be averaged (default: 10)");

hpx::util::perftests_cfg(desc_commandline);

std::vector<std::string> cfg;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,8 @@ int hpx_main(hpx::program_options::variables_map& vm)
if (vm.count("seed"))
seed = vm["seed"].as<unsigned int>();

int test_count = vm["test_count"].as<int>();

hpx::util::perftests_init(vm, "benchmark_partial_sort");

typedef std::less<std::uint64_t> compare_t;
Expand All @@ -147,7 +149,7 @@ int hpx_main(hpx::program_options::variables_map& vm)

hpx::util::perftests_report("hpx::partial_sort, size: " +
std::to_string(NELEM) + ", step: " + std::to_string(1),
"seq", 100, [&] {
"seq", test_count, [&] {
for (uint32_t i = 0; i < NELEM; i++)
{
B = A;
Expand All @@ -170,7 +172,8 @@ int hpx_main(hpx::program_options::variables_map& vm)
std::shuffle(A.begin(), A.end(), gen);

hpx::util::perftests_report(
"hpx::partial_sort, size: " + std::to_string(NELEM), "seq", 100, [&] {
"hpx::partial_sort, size: " + std::to_string(NELEM), "seq", test_count,
[&] {
B = A;
hpx::partial_sort(B.begin(), B.end(), B.end(), compare_t());
});
Expand All @@ -188,7 +191,9 @@ int main(int argc, char* argv[])
"Usage: " HPX_APPLICATION_STRING " [options]");

desc_commandline.add_options()("seed,s", value<unsigned int>(),
"the random number generator seed to use for this run");
"the random number generator seed to use for this run")("test_count",
value<int>()->default_value(10),
"number of tests to be averaged (default: 10)");

// By default this test should run on all available cores
std::vector<std::string> const cfg = {"hpx.os_threads=all"};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,8 @@ int hpx_main(hpx::program_options::variables_map& vm)
if (vm.count("seed"))
seed = vm["seed"].as<unsigned int>();

int test_count = vm["test_count"].as<int>();

hpx::util::perftests_init(vm, "benchmark_partial_sort_parallel");

// test_main();
Expand All @@ -46,7 +48,7 @@ int hpx_main(hpx::program_options::variables_map& vm)
std::shuffle(A.begin(), A.end(), gen);
hpx::util::perftests_report("hpx::partial_sort, size: " +
std::to_string(NELEM) + ", step: " + std::to_string(1),
"par", 100, [&] {
"par", test_count, [&] {
for (uint32_t i = 0; i < NELEM; ++i)
{
B = A;
Expand All @@ -70,7 +72,8 @@ int hpx_main(hpx::program_options::variables_map& vm)

std::shuffle(A.begin(), A.end(), gen);
hpx::util::perftests_report(
"hpx::partial_sort, size: " + std::to_string(NELEM), "par", 100, [&] {
"hpx::partial_sort, size: " + std::to_string(NELEM), "par", test_count,
[&] {
B = A;
hpx::partial_sort(::hpx::execution::par, B.begin(), B.end(),
B.end(), compare_t());
Expand All @@ -81,7 +84,7 @@ int hpx_main(hpx::program_options::variables_map& vm)
uint32_t STEP = NELEM / 100;
hpx::util::perftests_report("hpx::partial_sort, size: " +
std::to_string(NELEM) + ", step: " + std::to_string(STEP),
"par", 100, [&] {
"par", test_count, [&] {
for (uint32_t i = 0; i < NELEM; i += STEP)
{
B = A;
Expand All @@ -103,7 +106,9 @@ int main(int argc, char* argv[])
"Usage: " HPX_APPLICATION_STRING " [options]");

desc_commandline.add_options()("seed,s", value<unsigned int>(),
"the random number generator seed to use for this run");
"the random number generator seed to use for this run")("test_count",
value<int>()->default_value(10),
"number of tests to be averaged (default: 10)");

// By default this test should run on all available cores
std::vector<std::string> const cfg = {"hpx.os_threads=all"};
Expand Down
55 changes: 37 additions & 18 deletions libs/core/testing/src/performance.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,6 @@ namespace hpx::util {
namespace detail {

#if defined(HPX_HAVE_NANOBENCH)
constexpr int nanobench_epochs = 100;
constexpr int nanobench_warmup = 40;

char const* nanobench_hpx_simple_template() noexcept
Expand Down Expand Up @@ -94,7 +93,6 @@ average: {{average(elapsed)}}
static ankerl::nanobench::Config cfg;

cfg.mWarmup = nanobench_warmup;
cfg.mNumEpochs = nanobench_epochs;

return b.config(cfg);
}
Expand Down Expand Up @@ -171,7 +169,7 @@ average: {{average(elapsed)}}
strm << "]\n";
strm << "}\n";
}
else
else if (print_cdash_img)
{
strm << "Results:\n\n";
for (auto&& item : obj.m_map)
Expand All @@ -188,15 +186,36 @@ average: {{average(elapsed)}}
strm.precision(
std::numeric_limits<long double>::max_digits10 - 1);
strm << std::scientific << "average: " << average / series
<< "\n\n";
<< "\n";
strm << "<CTestMeasurement type=\"numeric/double\" name=\""
<< std::get<0>(item.first) << "_"
<< std::get<1>(item.first) << "\">" << std::scientific
<< average / series << "</CTestMeasurement>\n\n";
}
if (print_cdash_img)
for (std::size_t i = 0; i < obj.m_map.size(); i++)
strm << "<CTestMeasurementFile type=\"image/png\" "
"name=\"perftest\" >"
<< "./" << test_name_ << "_" << i
<< ".png</CTestMeasurementFile>\n";
}
else
{
strm << "Results:\n\n";
for (auto&& item : obj.m_map)
{
for (std::size_t i = 0; i < obj.m_map.size(); i++)
strm << "<CTestMeasurementFile type=\"image/png\" "
"name=\"perftest\" >"
<< "./" << test_name_ << "_" << i
<< ".png</CTestMeasurementFile>\n";
long double average = static_cast<long double>(0.0);
int series = 0;
strm << "name: " << std::get<0>(item.first) << "\n";
strm << "executor: " << std::get<1>(item.first) << "\n";
for (long double const val : item.second)
{
++series;
average += val;
}
strm.precision(
std::numeric_limits<long double>::max_digits10 - 1);
strm << std::scientific << "average: " << average / series
<< "\n\n";
}
}
return strm;
Expand All @@ -218,13 +237,10 @@ average: {{average(elapsed)}}
if (steps == 0)
return;

std::size_t const steps_per_epoch =
steps / detail::nanobench_epochs + 1;

detail::bench()
.name(name)
.context("executor", exec)
.minEpochIterations(steps_per_epoch)
.epochs(steps)
.run(test);
}

Expand All @@ -236,11 +252,13 @@ average: {{average(elapsed)}}
detail::bench().render(templ, strm);
if (!detailed_ && print_cdash_img)
{
for (long unsigned int i = 0; i < detail::bench().results().size(); i++)
for (long unsigned int i = 0; i < detail::bench().results().size();
i++)
{
strm << "<CTestMeasurementFile type=\"image/png\" "
"name=\"perftest\">"
<< "./" << test_name_ << "_" << i << ".png</CTestMeasurementFile>\n";
<< "./" << test_name_ << "_" << i
<< ".png</CTestMeasurementFile>\n";
}
}
}
Expand All @@ -256,8 +274,9 @@ average: {{average(elapsed)}}
{
if (detailed_)
perftests_print_times(detail::nanobench_hpx_template(), std::cout);
else if (print_cdash_img)
perftests_print_times(detail::nanobench_hpx_cdash_template(), std::cout);
else if (print_cdash_img)
perftests_print_times(
detail::nanobench_hpx_cdash_template(), std::cout);
else
perftests_print_times(
detail::nanobench_hpx_simple_template(), std::cout);
Expand Down
4 changes: 2 additions & 2 deletions tests/performance/local/future_overhead_report.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ int hpx_main(variables_map& vm)
numa_sensitive = 0;

bool test_all = (vm.count("test-all") > 0);
const int repetitions = vm["repetitions"].as<int>();
const int repetitions = vm["test_count"].as<int>();

if (vm.count("info"))
info_string = vm["info"].as<std::string>();
Expand Down Expand Up @@ -196,7 +196,7 @@ int main(int argc, char* argv[])
"number of iterations in the delay loop")

("test-all", "run all benchmarks")
("repetitions", value<int>()->default_value(1),
("test_count", value<int>()->default_value(1),
"number of repetitions of the full benchmark")

("info", value<std::string>()->default_value("no-info"),
Expand Down
4 changes: 2 additions & 2 deletions tests/performance/local/stream_report.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -439,7 +439,7 @@ auto run_benchmark(std::size_t warmup_iterations, std::size_t iterations,
int hpx_main(hpx::program_options::variables_map& vm)
{
std::size_t vector_size = vm["vector_size"].as<std::size_t>();
std::size_t iterations = vm["iterations"].as<std::size_t>();
std::size_t iterations = vm["test_count"].as<std::size_t>();
std::size_t warmup_iterations = vm["warmup_iterations"].as<std::size_t>();
std::size_t chunk_size = vm["chunk_size"].as<std::size_t>();
hpx::util::perftests_init(vm, "stream_report");
Expand Down Expand Up @@ -523,7 +523,7 @@ int main(int argc, char* argv[])
( "vector_size",
hpx::program_options::value<std::size_t>()->default_value(1024),
"size of vector (default: 1024)")
( "iterations",
( "test_count",
hpx::program_options::value<std::size_t>()->default_value(10),
"number of iterations to repeat each test. (default: 10)")
( "warmup_iterations",
Expand Down
2 changes: 1 addition & 1 deletion tools/perftests_plot.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ def median_statistic(sample1, sample2, axis=-1):
category.append("current")
samples.append(test1["series"])

data = (test2["series"], test1["series"]) / np.median(test1["series"])
data = (test2["series"] / np.median(test1["series"]), test1["series"] / np.median(test1["series"]))
res = scipy.stats.bootstrap(data, median_statistic, method='basic', random_state=rng)

mean2 = np.median(test2["series"])
Expand Down

0 comments on commit b53373e

Please sign in to comment.